Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0eb1f35 authored by Quentin Perret's avatar Quentin Perret
Browse files

ANDROID: sched/fair: Make the EAS wake-up prefer-idle aware



Make the mainline EAS wake-up path aware of prefer idle tasks in
preparation for disabling find_best_target().

What is done in the mainline algoritm isn't strictly equivalent to the
find_best_target() algorithm but comes real close, and isn't very
invasive. The main differences with the original find_best_target()
behaviour are the following:

 1. the policy for prefer idle when there isn't a single idle CPU in the
    system is simpler now. We just pick the CPU with the highest spare
    capacity;

 2. the cstate awareness for prefer idle is implemented by minimizing
    the exit latency rather than the idle state index. This is how it is
    done in the slow path (find_idlest_group_cpu()), it doesn't require
    us to keep hooks into CPUIdle, and should actually be better because
    what we want is a CPU that can wake up quickly;

 3. non-prefer-idle tasks just use the standard mainline energy-aware
    wake-up path, which decides the placement using the Energy Model.

Bug: 120440300
Change-Id: I57769c90c57115f6a28d27c5a88e08aa93a30a56
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 191472a4
Loading
Loading
Loading
Loading
+42 −4
Original line number Diff line number Diff line
@@ -6925,11 +6925,18 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
	return energy;
}

static void select_max_spare_cap_cpus(struct sched_domain *sd, cpumask_t *cpus,
		struct perf_domain *pd, struct task_struct *p)
static void select_cpu_candidates(struct sched_domain *sd, cpumask_t *cpus,
		struct perf_domain *pd, struct task_struct *p, int prev_cpu)
{
	int highest_spare_cap_cpu = prev_cpu, best_idle_cpu = -1;
	unsigned long spare_cap, max_spare_cap, util, cpu_cap;
	bool prefer_idle = schedtune_prefer_idle(p);
	bool boosted = schedtune_task_boost(p) > 0;
	unsigned long target_cap = boosted ? 0 : ULONG_MAX;
	unsigned long highest_spare_cap = 0;
	unsigned int min_exit_lat = UINT_MAX;
	int cpu, max_spare_cap_cpu;
	struct cpuidle_state *idle;

	for (; pd; pd = pd->next) {
		max_spare_cap_cpu = -1;
@@ -6954,11 +6961,42 @@ static void select_max_spare_cap_cpus(struct sched_domain *sd, cpumask_t *cpus,
				max_spare_cap = spare_cap;
				max_spare_cap_cpu = cpu;
			}

			if (!prefer_idle)
				continue;

			if (idle_cpu(cpu)) {
				cpu_cap = capacity_orig_of(cpu);
				if (boosted && cpu_cap < target_cap)
					continue;
				if (!boosted && cpu_cap > target_cap)
					continue;
				idle = idle_get_state(cpu_rq(cpu));
				if (idle && idle->exit_latency > min_exit_lat &&
						cpu_cap == target_cap)
					continue;

				if (idle)
					min_exit_lat = idle->exit_latency;
				target_cap = cpu_cap;
				best_idle_cpu = cpu;
			} else if (spare_cap > highest_spare_cap) {
				highest_spare_cap = spare_cap;
				highest_spare_cap_cpu = cpu;
			}
		}

		if (max_spare_cap_cpu >= 0)
		if (!prefer_idle && max_spare_cap_cpu >= 0)
			cpumask_set_cpu(max_spare_cap_cpu, cpus);
	}

	if (!prefer_idle)
		return;

	if (best_idle_cpu >= 0)
		cpumask_set_cpu(best_idle_cpu, cpus);
	else
		cpumask_set_cpu(highest_spare_cap_cpu, cpus);
}

static DEFINE_PER_CPU(cpumask_t, energy_cpus);
@@ -7045,7 +7083,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
	if (sched_feat(FIND_BEST_TARGET))
		find_best_target(sd, candidates, p);
	else
		select_max_spare_cap_cpus(sd, candidates, pd, p);
		select_cpu_candidates(sd, candidates, pd, p, prev_cpu);

	/* Bail out if no candidate was found. */
	weight = cpumask_weight(candidates);