Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 692aa6b9 authored by Maria Yu's avatar Maria Yu Committed by Gerrit - the friendly Code Review server
Browse files

sched/fair: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: I9216f9316e2bad067c10762de8d67912826b7bc7
Signed-off-by: default avatarMaria Yu <aiquny@codeaurora.org>
Co-developed-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[pkondeti@codeaurora.org: skip_cpu argument is implemented for fbt]
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: port to 5.4 and fix trivial merge conflicts]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent aa8b37ba
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ extern unsigned int __weak sysctl_sched_coloc_busy_hyst;
extern unsigned int __weak sysctl_sched_coloc_busy_hyst_max_ms;
extern unsigned int __weak sysctl_sched_window_stats_policy;
extern unsigned int __weak sysctl_sched_ravg_window_nr_ticks;
extern unsigned int __weak sysctl_sched_many_wakeup_threshold;

extern int __weak
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+23 −3
Original line number Diff line number Diff line
@@ -3884,6 +3884,7 @@ struct find_best_target_env {
	int fastpath;
	int start_cpu;
	bool strict_max;
	int skip_cpu;
};

static inline void adjust_cpus_for_packing(struct task_struct *p,
@@ -6358,6 +6359,12 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
	return sched_boost() != CONSERVATIVE_BOOST &&
		get_rtg_status(p) && p->unfilter;
}

static inline bool is_many_wakeup(int sibling_count_hint)
{
	return sibling_count_hint >= sysctl_sched_many_wakeup_threshold;
}

#else
static inline bool get_rtg_status(struct task_struct *p)
{
@@ -6368,6 +6375,11 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
{
	return false;
}

static inline bool is_many_wakeup(int sibling_count_hint)
{
	return false;
}
#endif

static int get_start_cpu(struct task_struct *p)
@@ -6507,6 +6519,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
			if (sched_cpu_high_irqload(i))
				continue;

			if (fbt_env->skip_cpu == i)
				continue;

			/*
			 * p's blocked utilization is still accounted for on prev_cpu
			 * so prev_cpu will receive a negative bias due to the double
@@ -6971,7 +6986,8 @@ static DEFINE_PER_CPU(cpumask_t, energy_cpus);
 * other use-cases too. So, until someone finds a better way to solve this,
 * let's keep things simple by re-using the existing slow path.
 */
int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
				     int sync, int sibling_count_hint)
{
	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -7046,6 +7062,8 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
		fbt_env.boosted = boosted;
		fbt_env.strict_max = is_rtg &&
			(task_boost == TASK_BOOST_STRICT_MAX);
		fbt_env.skip_cpu = is_many_wakeup(sibling_count_hint) ?
				   cpu : -1;

		find_best_target(NULL, candidates, p, &fbt_env);

@@ -7234,7 +7252,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f

	if (sched_energy_enabled()) {
		rcu_read_lock();
		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
						    sibling_count_hint);
		if (unlikely(new_cpu < 0))
			new_cpu = prev_cpu;
		rcu_read_unlock();
@@ -7245,7 +7264,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
		record_wakee(p);

		if (sched_energy_enabled()) {
			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
							    sibling_count_hint);
			if (new_cpu >= 0)
				return new_cpu;
			new_cpu = prev_cpu;
+1 −1
Original line number Diff line number Diff line
@@ -3377,7 +3377,7 @@ static inline int group_balance_cpu_not_isolated(struct sched_group *sg)
#endif /* CONFIG_SMP */

extern int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
								int sync);
					int sync, int sibling_count_hint);
extern int active_load_balance_cpu_stop(void *data);

#ifdef CONFIG_HOTPLUG_CPU
+9 −0
Original line number Diff line number Diff line
@@ -418,6 +418,15 @@ static struct ctl_table kern_table[] = {
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{
		.procname	= "sched_many_wakeup_threshold",
		.data		= &sysctl_sched_many_wakeup_threshold,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &two,
		.extra2		= &one_thousand,
	},
	{
		.procname	= "sched_walt_rotate_big_tasks",
		.data		= &sysctl_sched_walt_rotate_big_tasks,