Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb11725f authored by Joonwoo Park's avatar Joonwoo Park Committed by Syed Rameez Mustafa
Browse files

sched: restrict sync wakee placement bias with waker's demand



Biasing sync wakee task towards waker CPU's cluster makes sense when the
waker's demand is high enough so the wakee also can take advantage
of high CPU frequency voted because of waker's load.  Placing sync wakee
on the low demand waker's CPU can lead placement imbalance which can
lead unnecessary migration.

Introduce a new tunable "sched_big_waker_task_load" that defines the big
waker so scheduler avoid wakee on waker's cluster bias when the waker's
load is below the tunable.

CRs-fixed: 971295
Change-Id: I1550ede0a71ac8c9be74a7daabe164c6a269a3fb
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 3bd5f6f4
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -1292,6 +1292,17 @@ Sync wakee tasks which have demand less than sched_small_wakee_task_load are
categorized as small wakee tasks.  Scheduler places small wakee tasks on the
waker's cluster.


*** 7.26 sched_big_waker_task_load

Appears at: /proc/sys/kernel/sched_big_waker_task_load

Default value: 25

This tunable is a percentage.  Configure the minimum demand of big sync waker
task.  Scheduler places small wakee tasks woken up by big sync waker on the
waker's cluster.

=========================
8. HMP SCHEDULER TRACE POINTS
=========================
+1 −0
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ extern int sysctl_sched_upmigrate_min_nice;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;

#ifdef CONFIG_SCHED_QHMP
extern unsigned int sysctl_sched_min_runtime;
+8 −0
Original line number Diff line number Diff line
@@ -2476,6 +2476,9 @@ unsigned int __read_mostly sysctl_sched_enable_power_aware = 0;
unsigned int __read_mostly sched_small_wakee_task_load;
unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;

unsigned int __read_mostly sched_big_waker_task_load;
unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;

/*
 * CPUs with load greater than the sched_spill_load_threshold are not
 * eligible for task placement. When all CPUs in a cluster achieve a
@@ -2606,6 +2609,10 @@ void set_hmp_defaults(void)
	sched_small_wakee_task_load =
		div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
			  (u64)sched_ravg_window, 100);

	sched_big_waker_task_load =
		div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
			  (u64)sched_ravg_window, 100);
}

u32 sched_get_init_task_load(struct task_struct *p)
@@ -3276,6 +3283,7 @@ static inline bool
wake_to_waker_cluster(struct cpu_select_env *env)
{
	return !env->need_idle && !env->reason && env->sync &&
	       task_load(current) > sched_big_waker_task_load &&
	       task_load(env->p) < sched_small_wakee_task_load;
}

+7 −0
Original line number Diff line number Diff line
@@ -467,6 +467,13 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler   = sched_hmp_proc_update_handler,
	},
	{
		.procname	= "sched_big_waker_task_load",
		.data		= &sysctl_sched_big_waker_task_load_pct,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler   = sched_hmp_proc_update_handler,
	},
#ifdef CONFIG_SCHED_FREQ_INPUT
	{
		.procname       = "sched_new_task_windows",