Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 611e7f29 authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala Committed by Pavankumar Kondeti
Browse files

sched/fair: Add policy for restricting prefer_spread to newly idle balance



Add policy for restricting prefer_spread to newly idle load balance
by expanding the tunable range.

To allow lower capacity CPUs to do aggressive newly idle load balance:
echo 3 > /proc/sys/kernel/sched_prefer_spread

To allow bother lower capacity and higher capacity CPUs to do
aggressive newly idle load balance:
echo 4 > /proc/sys/kernel/sched_prefer_spread

Change-Id: Ia62ddb29bdf592a956a9688f277178ef71dee1b3
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
[pkondeti@codeaurora.org: The tunable range is expanded]
Co-developed-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 538a6d66
Loading
Loading
Loading
Loading
+20 −12
Original line number Diff line number Diff line
@@ -3909,20 +3909,26 @@ struct find_best_target_env {
	int skip_cpu;
};

static inline bool prefer_spread_on_idle(int cpu)
static inline bool prefer_spread_on_idle(int cpu, bool new_ilb)
{
#ifdef CONFIG_SCHED_WALT
	if (likely(!sysctl_sched_prefer_spread))
	switch (sysctl_sched_prefer_spread) {
	case 1:
		return is_min_capacity_cpu(cpu);
	case 2:
		return true;
	case 3:
		return (new_ilb && is_min_capacity_cpu(cpu));
	case 4:
		return new_ilb;
	default:
		return false;

	if (is_min_capacity_cpu(cpu))
		return sysctl_sched_prefer_spread >= 1;

	return sysctl_sched_prefer_spread > 1;
	}
#else
	return false;
#endif
}

#ifdef CONFIG_SCHED_WALT
static inline void walt_adjust_cpus_for_packing(struct task_struct *p,
				int *target_cpu, int *best_idle_cpu,
@@ -3934,7 +3940,7 @@ static inline void walt_adjust_cpus_for_packing(struct task_struct *p,
	if (*best_idle_cpu == -1 || *target_cpu == -1)
		return;

	if (prefer_spread_on_idle(*best_idle_cpu))
	if (prefer_spread_on_idle(*best_idle_cpu, false))
		fbt_env->need_idle |= 2;

	if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
@@ -10082,7 +10088,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,

#ifdef CONFIG_SCHED_WALT
	env.prefer_spread = (idle != CPU_NOT_IDLE &&
				prefer_spread_on_idle(this_cpu) &&
				prefer_spread_on_idle(this_cpu,
				idle == CPU_NEWLY_IDLE) &&
				!((sd->flags & SD_ASYM_CPUCAPACITY) &&
				 !cpumask_test_cpu(this_cpu,
						 &asym_cap_sibling_cpus)));
@@ -10632,7 +10639,8 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
		max_cost += sd->max_newidle_lb_cost;

#ifdef CONFIG_SCHED_WALT
		if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu))
		if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu,
					idle == CPU_NEWLY_IDLE))
			continue;
#endif

@@ -10892,7 +10900,7 @@ static void nohz_balancer_kick(struct rq *rq)
	 */
	if (sched_energy_enabled()) {
		if (rq->nr_running >= 2 && (cpu_overutilized(cpu) ||
					prefer_spread_on_idle(cpu)))
					prefer_spread_on_idle(cpu, false)))
			flags = NOHZ_KICK_MASK;
		goto out;
	}
@@ -11301,7 +11309,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
	int pulled_task = 0;
	u64 curr_cost = 0;
	u64 avg_idle = this_rq->avg_idle;
	bool prefer_spread = prefer_spread_on_idle(this_cpu);
	bool prefer_spread = prefer_spread_on_idle(this_cpu, true);
	bool force_lb = (!is_min_capacity_cpu(this_cpu) &&
				silver_has_big_tasks() &&
				(atomic_read(&this_rq->nr_iowait) == 0));
+1 −1
Original line number Diff line number Diff line
@@ -552,7 +552,7 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1		= SYSCTL_ZERO,
		.extra2		= &two,
		.extra2		= &four,
	},
	{
		.procname	= "walt_rtg_cfs_boost_prio",