Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c2e3d88 authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by Andres Oportus
Browse files

ANDROID: sched/fair: Code !is_big_little path into select_energy_cpu_brute()



This patch replaces the existing EAS upstream implementation of
select_energy_cpu_brute() with the one of find_best_target() used
in Android previously.

It also removes the cpumask 'and' from select_energy_cpu_brute,
see the existing use of 'cpu = smp_processor_id()' in
select_task_rq_fair().

Change-Id: If678c002efaa87d1ba3ec9989a4e9f8df98b83ec
Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
[ added guarding for non-schedtune builds ]
Signed-off-by: default avatarChris Redpath <chris.redpath@arm.com>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 52b09b1f
Loading
Loading
Loading
Loading
+29 −37
Original line number Diff line number Diff line
@@ -6413,65 +6413,57 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)

static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
	int i;
	int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
	unsigned long max_spare = 0;
	struct sched_domain *sd;
	int target_cpu = prev_cpu, tmp_target;
	bool boosted, prefer_idle;

	if (sysctl_sched_sync_hint_enable && sync) {
		int cpu = smp_processor_id();
		cpumask_t search_cpus;
		cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
		if (cpumask_test_cpu(cpu, &search_cpus))

		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
			return cpu;
	}

	rcu_read_lock();
#ifdef CONFIG_CGROUP_SCHEDTUNE
	boosted = schedtune_task_boost(p) > 0;
	prefer_idle = schedtune_prefer_idle(p) > 0;
#else
	boosted = get_sysctl_sched_cfs_boost() > 0;
	prefer_idle = 0;
#endif

	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
	/* Find a cpu with sufficient capacity */
	tmp_target = find_best_target(p, boosted, prefer_idle);

	if (!sd)
		goto unlock;
	if (tmp_target >= 0) {
		target_cpu = tmp_target;
		if ((boosted || prefer_idle) && idle_cpu(target_cpu))
			goto unlock;
	}

	for_each_cpu_and(i, tsk_cpus_allowed(p), sched_domain_span(sd)) {
		int diff;
		unsigned long spare;

	if (target_cpu != prev_cpu) {
		struct energy_env eenv = {
			.util_delta     = task_util(p),
			.src_cpu        = prev_cpu,
			.dst_cpu	= i,
			.dst_cpu        = target_cpu,
			.task           = p,
		};

		spare = capacity_spare_wake(i, p);

		if (i == prev_cpu)
			continue;

		if (spare > max_spare) {
			max_spare = spare;
			spare_cpu = i;
		}

		if (spare * 1024 < capacity_margin * task_util(p))
			continue;

		diff = energy_diff(&eenv);
		/* Not enough spare capacity on previous cpu */
		if (cpu_overutilized(prev_cpu))
			goto unlock;

		if (diff < min_diff) {
			min_diff = diff;
			energy_cpu = i;
		}
		if (energy_diff(&eenv) >= 0)
			target_cpu = prev_cpu;
	}

unlock:
	rcu_read_unlock();

	if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
		return prev_cpu;

	return energy_cpu != prev_cpu ? energy_cpu : spare_cpu;
	return target_cpu;
}

/*