Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54fde738 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Improve the scheduler"

parents 9e4cf237 c47c3c9e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ extern unsigned int __weak sysctl_sched_coloc_busy_hyst;
extern unsigned int __weak sysctl_sched_coloc_busy_hyst_max_ms;
extern unsigned int __weak sysctl_sched_window_stats_policy;
extern unsigned int __weak sysctl_sched_ravg_window_nr_ticks;
extern unsigned int __weak sysctl_sched_many_wakeup_threshold;

extern int __weak
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
+35 −7
Original line number Diff line number Diff line
@@ -3817,7 +3817,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
}

static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu, int start_cpu)
bias_to_this_cpu(struct task_struct *p, int cpu, int start_cpu)
{
	bool base_test = cpumask_test_cpu(cpu, &p->cpus_mask) &&
						cpu_active(cpu);
@@ -3884,6 +3884,7 @@ struct find_best_target_env {
	int fastpath;
	int start_cpu;
	bool strict_max;
	int skip_cpu;
};

static inline void adjust_cpus_for_packing(struct task_struct *p,
@@ -6358,6 +6359,12 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
	return sched_boost() != CONSERVATIVE_BOOST &&
		get_rtg_status(p) && p->unfilter;
}

static inline bool is_many_wakeup(int sibling_count_hint)
{
	return sibling_count_hint >= sysctl_sched_many_wakeup_threshold;
}

#else
static inline bool get_rtg_status(struct task_struct *p)
{
@@ -6368,6 +6375,11 @@ static inline bool task_skip_min_cpu(struct task_struct *p)
{
	return false;
}

static inline bool is_many_wakeup(int sibling_count_hint)
{
	return false;
}
#endif

static int get_start_cpu(struct task_struct *p)
@@ -6418,6 +6430,7 @@ enum fastpaths {
	NONE = 0,
	SYNC_WAKEUP,
	PREV_CPU_FASTPATH,
	MANY_WAKEUP,
};

static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
@@ -6507,6 +6520,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
			if (sched_cpu_high_irqload(i))
				continue;

			if (fbt_env->skip_cpu == i)
				continue;

			/*
			 * p's blocked utilization is still accounted for on prev_cpu
			 * so prev_cpu will receive a negative bias due to the double
@@ -6971,7 +6987,8 @@ static DEFINE_PER_CPU(cpumask_t, energy_cpus);
 * other use-cases too. So, until someone finds a better way to solve this,
 * let's keep things simple by re-using the existing slow path.
 */
int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
				     int sync, int sibling_count_hint)
{
	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -7013,10 +7030,17 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
	if (need_idle)
		sync = 0;

	if (sync && bias_to_waker_cpu(p, cpu, start_cpu)) {
	if (sync && bias_to_this_cpu(p, cpu, start_cpu)) {
		best_energy_cpu = cpu;
		fbt_env.fastpath = SYNC_WAKEUP;
		goto sync_wakeup;
		goto done;
	}

	if (is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
				bias_to_this_cpu(p, prev_cpu, start_cpu)) {
		best_energy_cpu = prev_cpu;
		fbt_env.fastpath = MANY_WAKEUP;
		goto done;
	}

	rcu_read_lock();
@@ -7046,6 +7070,8 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
		fbt_env.boosted = boosted;
		fbt_env.strict_max = is_rtg &&
			(task_boost == TASK_BOOST_STRICT_MAX);
		fbt_env.skip_cpu = is_many_wakeup(sibling_count_hint) ?
				   cpu : -1;

		find_best_target(NULL, candidates, p, &fbt_env);

@@ -7196,7 +7222,7 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
	    (capacity_orig_of(prev_cpu) <= capacity_orig_of(start_cpu)))
		best_energy_cpu = prev_cpu;

sync_wakeup:
done:
	trace_sched_task_util(p, cpumask_bits(candidates)[0], best_energy_cpu,
			sync, need_idle, fbt_env.fastpath, placement_boost,
			start_t, boosted, is_rtg, get_rtg_status(p), start_cpu);
@@ -7234,7 +7260,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f

	if (sched_energy_enabled()) {
		rcu_read_lock();
		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
		new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
						    sibling_count_hint);
		if (unlikely(new_cpu < 0))
			new_cpu = prev_cpu;
		rcu_read_unlock();
@@ -7245,7 +7272,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
		record_wakee(p);

		if (sched_energy_enabled()) {
			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync,
							    sibling_count_hint);
			if (new_cpu >= 0)
				return new_cpu;
			new_cpu = prev_cpu;
+1 −1
Original line number Diff line number Diff line
@@ -3377,7 +3377,7 @@ static inline int group_balance_cpu_not_isolated(struct sched_group *sg)
#endif /* CONFIG_SMP */

extern int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
								int sync);
					int sync, int sibling_count_hint);
extern int active_load_balance_cpu_stop(void *data);

#ifdef CONFIG_HOTPLUG_CPU
+9 −0
Original line number Diff line number Diff line
@@ -418,6 +418,15 @@ static struct ctl_table kern_table[] = {
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{
		.procname	= "sched_many_wakeup_threshold",
		.data		= &sysctl_sched_many_wakeup_threshold,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &two,
		.extra2		= &one_thousand,
	},
	{
		.procname	= "sched_walt_rotate_big_tasks",
		.data		= &sysctl_sched_walt_rotate_big_tasks,