Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83717be3 authored by Chris Redpath's avatar Chris Redpath
Browse files

ANDROID: refactor select_task_rq_fair et al to be cleaner



We can move the energy-aware wakeup functionality around
a little and make it clear that it fits into the slow-path
wakeup code. The resulting code is a very close match to
the shape we expect to push upstream for comment.

Change-Id: Icf5a2ae9bdf6db5977240e5a1f1da2ed41e0fad7
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
parent c0ff131c
Loading
Loading
Loading
Loading
+46 −23
Original line number Diff line number Diff line
@@ -6637,26 +6637,27 @@ static bool cpu_overutilized(int cpu)
	return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
}

static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
/*
 * Needs to be called inside rcu_read_lock critical section.
 * sd is a pointer to the sched domain we wish to use for an
 * energy-aware placement option.
 */
static int find_energy_efficient_cpu(struct sched_domain *sd,
				     struct task_struct *p,
				     int cpu, int prev_cpu,
				     int sync)
{
	int i;
	int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
	int energy_cpu = prev_cpu, spare_cpu = prev_cpu;
	unsigned long max_spare = 0;
	struct sched_domain *sd;
	int min_diff = 0;
	int i;

	if (sysctl_sched_sync_hint_enable && sync) {
		int cpu = smp_processor_id();

		if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
			return cpu;
		}
	}

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));

	if (!sd)
		goto unlock;

	for_each_cpu_and(i, &p->cpus_allowed, sched_domain_span(sd)) {
		int diff;
		unsigned long spare;
@@ -6689,7 +6690,6 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
		}
	}

unlock:
	if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
		return prev_cpu;

@@ -6729,6 +6729,14 @@ static inline int wake_energy(struct task_struct *p, int prev_cpu,
	if (sd_overutilized(sd))
		return false;

	/*
	 * we cannot do energy-aware wakeup placement sensibly
	 * for tasks with 0 utilization, so let them be placed
	 * according to the normal strategy.
	 */
	if (unlikely(!task_util(p)))
		return false;

	if(!sched_feat(EAS_PREFER_IDLE)){
		/*
		 * Force prefer-idle tasks into the slow path, this may not happen
@@ -6756,23 +6764,23 @@ static int
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags,
		    int sibling_count_hint)
{
	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
	struct sched_domain *tmp, *affine_sd = NULL;
	struct sched_domain *sd = NULL, *energy_sd = NULL;
	int cpu = smp_processor_id();
	int new_cpu = prev_cpu;
	int want_affine = 0;
	int want_energy = 0;
	int sync = wake_flags & WF_SYNC;

	rcu_read_lock();

	if (sd_flag & SD_BALANCE_WAKE) {
		record_wakee(p);
		want_affine = !wake_wide(p, sibling_count_hint) && !wake_cap(p, cpu, prev_cpu)
			      && cpumask_test_cpu(cpu, &p->cpus_allowed);
	}

	if (wake_energy(p, prev_cpu, sd_flag, wake_flags)) {
		new_cpu = select_energy_cpu_brute(p, prev_cpu, sync);
		goto unlock;
		want_energy = wake_energy(p, prev_cpu, sd_flag, wake_flags);
		want_affine = !want_energy &&
			      !wake_wide(p, sibling_count_hint) &&
			      !wake_cap(p, cpu, prev_cpu) &&
			      cpumask_test_cpu(cpu, &p->cpus_allowed);
	}

	for_each_domain(cpu, tmp) {
@@ -6789,9 +6797,22 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
			break;
		}

		/*
		 * If we are able to try an energy-aware wakeup,
		 * select the highest non-overutilized sched domain
		 * which includes this cpu and prev_cpu
		 *
		 * maybe want to not test prev_cpu and only consider
		 * the current one?
		 */
		if (want_energy &&
		    !sd_overutilized(tmp) &&
		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
			energy_sd = tmp;

		if (tmp->flags & sd_flag)
			sd = tmp;
		else if (!want_affine)
		else if (!(want_affine || want_energy))
			break;
	}

@@ -6819,10 +6840,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
			new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);

	} else {
		if (energy_sd)
			new_cpu = find_energy_efficient_cpu(energy_sd, p, cpu, prev_cpu, sync);
		else
			new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
	}

unlock:
	rcu_read_unlock();

#ifdef CONFIG_NO_HZ_COMMON