Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 308ef5aa authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Satya Durga Srinivasu Prabhala
Browse files

sched/fair: Consider an idle CPU outside c-state as an active CPU



The find_best_target() selects an active CPU and an idle CPU as
two candidate CPUs. Whichever CPUs saves the most energy compared
to the previous CPU is selected finally. An idle CPU i.e no runnable
tasks but also outside c-state is a good candidate to run the
waking task since the task can run immediately and there is no
idle exit latency. Hence consider such CPU as an active CPU which
helps both power and performance.

Change-Id: I34f40c2dbca70995a8e6b4a8d5876f802bc000bc
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: Bring in is_packing_eligible functionality
from msm-4.9 to check whether packing can be done or not as part of
dependency]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent a2201034
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -3118,6 +3118,8 @@ unsigned long long task_sched_runtime(struct task_struct *p)
	return ns;
}

unsigned int capacity_margin_freq = 1280; /* ~20% margin */

/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
+39 −5
Original line number Diff line number Diff line
@@ -7003,6 +7003,38 @@ struct find_best_target_env {
	bool need_idle;
};

static bool is_packing_eligible(struct task_struct *p, int target_cpu,
				struct find_best_target_env *fbt_env,
				unsigned int target_cpus_count,
				int best_idle_cstate)
{
	unsigned long tutil, estimated_capacity;

	if (fbt_env->placement_boost || fbt_env->need_idle)
		return false;

	if (best_idle_cstate == -1)
		return false;

	if (target_cpus_count != 1)
		return true;

	if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
		tutil = 0;
	else
		tutil = task_util(p);

	estimated_capacity = cpu_util_cum(target_cpu, tutil);
	estimated_capacity = add_capacity_margin(estimated_capacity,
						target_cpu);

	/*
	 * If there is only one active CPU and it is already above its current
	 * capacity, avoid placing additional task on the CPU.
	 */
	return (estimated_capacity <= capacity_curr_of(target_cpu));
}

static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
			   struct cpumask *rtg_target)
{
@@ -7047,6 +7079,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	int target_cpu = -1;
	int cpu, i;
	unsigned long spare_cap;
	unsigned int active_cpus_count = 0;

	*backup_cpu = -1;

@@ -7270,6 +7303,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			 * capacity.
			 */

			active_cpus_count++;

			/* Favor CPUs with maximum spare capacity */
			if ((capacity_orig - new_util) < target_max_spare_cap)
				continue;
@@ -7290,12 +7325,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

	} while (sg = sg->next, sg != sd->groups);

	if (fbt_env->need_idle || fbt_env->placement_boost) {
		if (best_idle_cpu != -1) {
	if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
					active_cpus_count, best_idle_cstate)) {
		target_cpu = best_idle_cpu;
		best_idle_cpu = -1;
	}
	}

	/*
	 * For non latency sensitive tasks, cases B and C in the previous loop,