Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d137b0e authored by Olav Haugan's avatar Olav Haugan
Browse files

sched/fair: Limit sync wakeup bias to waker cpu



Add logic to minimize which sync wakeups gets affected by biasing to
waker cpu. The waking task must have a load of less than 10% while the
waker has to be bigger than 25%.

This helps perfomance is certain use cases.

Change-Id: I8d7adb0634f78396a9b8b330a1ebce3dec95be8c
Signed-off-by: default avatarOlav Haugan <ohaugan@codeaurora.org>
parent a7bbb875
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -5563,6 +5563,13 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
	return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
}

static inline bool bias_to_waker_cpu_enabled(struct task_struct *wakee,
		struct task_struct *waker)
{
	return task_util(waker) > sched_big_waker_task_load &&
		task_util(wakee) < sched_small_wakee_task_load;
}

static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
@@ -6917,6 +6924,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	struct related_thread_group *grp;
	cpumask_t search_cpus;
	int prev_cpu = task_cpu(p);
	struct task_struct *curr = cpu_rq(cpu)->curr;
#ifdef CONFIG_SCHED_CORE_ROTATE
	bool do_rotate = false;
	bool avoid_prev_cpu = false;
@@ -6943,7 +6951,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
	if (grp && grp->preferred_cluster)
		rtg_target = &grp->preferred_cluster->cpus;

	if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
	if (sync && bias_to_waker_cpu_enabled(p, curr) &&
		bias_to_waker_cpu(p, cpu, rtg_target)) {
		trace_sched_task_util_bias_to_waker(p, prev_cpu,
					task_util(p), cpu, cpu, 0, need_idle);
		return cpu;
+12 −0
Original line number Diff line number Diff line
@@ -162,6 +162,13 @@ static const unsigned int top_tasks_bitmap_size =
 */
__read_mostly unsigned int sysctl_sched_freq_reporting_policy;


#define SCHED_BIG_WAKER_TASK_LOAD_PCT 25UL
#define SCHED_SMALL_WAKEE_TASK_LOAD_PCT 10UL

__read_mostly unsigned int sched_big_waker_task_load;
__read_mostly unsigned int sched_small_wakee_task_load;

static int __init set_sched_ravg_window(char *str)
{
	unsigned int window_size;
@@ -3116,4 +3123,9 @@ void walt_sched_init(struct rq *rq)

	walt_cpu_util_freq_divisor =
	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;

	sched_big_waker_task_load =
		(SCHED_BIG_WAKER_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
	sched_small_wakee_task_load =
		(SCHED_SMALL_WAKEE_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
}