Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 205041be authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Always wake up on waker cpu for sync wakeups"

parents 2d7dfcc3 0a80c6f6
Loading
Loading
Loading
Loading
+13 −60
Original line number Diff line number Diff line
@@ -5406,28 +5406,12 @@ static inline int task_util(struct task_struct *p)
	return p->se.avg.util_avg;
}

#define SCHED_ENABLE_WAKER_WAKEE	0

static unsigned int sched_small_wakee_task_util = 102; /* ~10% of max cap */
static unsigned int sched_big_waker_task_util = 256;  /* 25% of max cap */

static inline bool
wake_on_waker_sibling(struct task_struct *p)
{
	return SCHED_ENABLE_WAKER_WAKEE &&
	       task_util(current) > sched_big_waker_task_util &&
	       task_util(p) < sched_small_wakee_task_util;
}

#define sysctl_sched_prefer_sync_wakee_to_waker 0

static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu)
{
	return sysctl_sched_prefer_sync_wakee_to_waker &&
	       cpu_rq(cpu)->nr_running == 1 &&
	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
	       cpu_active(cpu) && !cpu_isolated(cpu);
	return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
	       cpu_active(cpu) && !cpu_isolated(cpu) &&
	       task_fits_max(p, cpu);
}

static int calc_util_delta(struct energy_env *eenv, int cpu)
@@ -6738,10 +6722,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	unsigned int target_cpu_util = UINT_MAX;
	long target_cpu_new_util_cum = LONG_MAX;
	struct cpumask *rtg_target = NULL;
	bool wake_on_sibling = false;
	int isolated_candidate = -1;
	bool need_idle;
	bool skip_ediff = false;
	enum sched_boost_policy placement_boost = task_sched_boost(p) ?
				sched_boost_policy() : SCHED_BOOST_NONE;

@@ -6754,10 +6736,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	sg_target = sg;

	sync = sync && sysctl_sched_sync_hint_enable;

	curr_util = boosted_task_util(cpu_rq(cpu)->curr);

	need_idle = wake_to_idle(p);

	if (sync && bias_to_waker_cpu(p, cpu)) {
		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
					task_util(p), cpu, cpu, 0, need_idle);
		return cpu;
	}

	if (sysctl_sched_is_big_little) {
		struct related_thread_group *grp;

@@ -6765,17 +6754,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
		grp = task_related_thread_group(p);
		rcu_read_unlock();

		if (grp && grp->preferred_cluster) {
		if (grp && grp->preferred_cluster)
			rtg_target = &grp->preferred_cluster->cpus;
		} else if (sync && wake_on_waker_sibling(p)) {
			if (bias_to_waker_cpu(p, cpu)) {
				trace_sched_task_util_bias_to_waker(p,
						task_cpu(p), task_util(p), cpu,
						cpu, 0, need_idle);
				return cpu;
			}
			wake_on_sibling = true;
		}

		task_util_boosted = boosted_task_util(p);

@@ -6826,21 +6806,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
							     rtg_target))
						break;
					continue;
				} else if (wake_on_sibling) {
					/* Skip non-sibling CPUs */
					if (!cpumask_test_cpu(cpu,
							sched_group_cpus(sg)))
						continue;
				} else if (sync && curr_util >=
						   task_util_boosted) {
					if (cpumask_test_cpu(cpu,
							sched_group_cpus(sg))) {
						if (!cpumask_test_cpu(task_cpu(p),
								      sched_group_cpus(sg)))
							skip_ediff = true;
						break;
					}
					continue;
				}

				target_max_cap = capacity_of(max_cap_cpu);
@@ -6909,8 +6874,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
				       idle_get_state_idx(cpu_rq(i));

			if (!need_idle &&
			    (!wake_on_sibling ||
			     (wake_on_sibling && i != cpu)) &&
			    add_capacity_margin(new_util_cum) <
			    capacity_curr_of(i)) {
				if (sysctl_sched_cstate_aware) {
@@ -6944,9 +6907,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
					target_cpu = i;
					break;
				}
			} else if (!need_idle &&
				   (!wake_on_sibling ||
				    (wake_on_sibling && i != cpu))) {
			} else if (!need_idle) {
				/*
				 * At least one CPU other than target_cpu is
				 * going to raise CPU's OPP higher than current
@@ -7017,13 +6978,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
		}
	}

	if (wake_on_sibling && target_cpu != -1) {
		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
						task_util(p), target_cpu,
						target_cpu, 0, need_idle);
		return target_cpu;
	}

	if (target_cpu != task_cpu(p) && !cpu_isolated(task_cpu(p))) {
		struct energy_env eenv = {
			.util_delta	= task_util(p),
@@ -7059,7 +7013,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
			return target_cpu;
		}

		if (!skip_ediff)
		ediff = energy_diff(&eenv);

		if (!sysctl_sched_cstate_aware) {