Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 562c20cd authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Add policy for restricting prefer_spread to newly idle balance"

parents 28b12c5b 611e7f29
Loading
Loading
Loading
Loading
+49 −32
Original line number Diff line number Diff line
@@ -3909,20 +3909,26 @@ struct find_best_target_env {
	int skip_cpu;
};

static inline bool prefer_spread_on_idle(int cpu)
static inline bool prefer_spread_on_idle(int cpu, bool new_ilb)
{
#ifdef CONFIG_SCHED_WALT
	if (likely(!sysctl_sched_prefer_spread))
	switch (sysctl_sched_prefer_spread) {
	case 1:
		return is_min_capacity_cpu(cpu);
	case 2:
		return true;
	case 3:
		return (new_ilb && is_min_capacity_cpu(cpu));
	case 4:
		return new_ilb;
	default:
		return false;

	if (is_min_capacity_cpu(cpu))
		return sysctl_sched_prefer_spread >= 1;

	return sysctl_sched_prefer_spread > 1;
	}
#else
	return false;
#endif
}

#ifdef CONFIG_SCHED_WALT
static inline void walt_adjust_cpus_for_packing(struct task_struct *p,
				int *target_cpu, int *best_idle_cpu,
@@ -3934,7 +3940,7 @@ static inline void walt_adjust_cpus_for_packing(struct task_struct *p,
	if (*best_idle_cpu == -1 || *target_cpu == -1)
		return;

	if (prefer_spread_on_idle(*best_idle_cpu))
	if (prefer_spread_on_idle(*best_idle_cpu, false))
		fbt_env->need_idle |= 2;

	if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
@@ -9159,10 +9165,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
	if (sgs->group_type < busiest->group_type)
		return false;

	if (env->prefer_spread && env->idle != CPU_NOT_IDLE &&
		(sgs->sum_nr_running > busiest->sum_nr_running) &&
		(sgs->group_util > busiest->group_util))
	/*
	 * This sg and busiest are classified as same. when prefer_spread
	 * is true, we want to maximize the chance of pulling taks, so
	 * prefer to pick sg with more runnable tasks and break the ties
	 * with utilization.
	 */
	if (env->prefer_spread) {
		if (sgs->sum_nr_running < busiest->sum_nr_running)
			return false;
		if (sgs->sum_nr_running > busiest->sum_nr_running)
			return true;
		return sgs->group_util > busiest->group_util;
	}

	if (sgs->avg_load <= busiest->avg_load)
		return false;
@@ -9198,10 +9213,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,

asym_packing:

	if (env->prefer_spread &&
		(sgs->sum_nr_running < busiest->sum_nr_running))
		return false;

	/* This is the busiest node in its class. */
	if (!(env->sd->flags & SD_ASYM_PACKING))
		return true;
@@ -9682,15 +9693,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s

		return fix_small_imbalance(env, sds);
	}

	/*
	 * If we couldn't find any imbalance, then boost the imbalance
	 * with the group util.
	 */
	if (env->prefer_spread && !env->imbalance &&
		env->idle != CPU_NOT_IDLE &&
		busiest->sum_nr_running > busiest->group_weight)
		env->imbalance = busiest->group_util;
}

/******* find_busiest_group() helpers end here *********************/
@@ -9730,7 +9732,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
			int cpu_local, cpu_busiest;
			unsigned long capacity_local, capacity_busiest;

			if (env->idle != CPU_NEWLY_IDLE)
			if (env->idle != CPU_NEWLY_IDLE && !env->prefer_spread)
				goto out_balanced;

			if (!sds.local || !sds.busiest)
@@ -9779,9 +9781,13 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
	/*
	 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
	 * capacities from resulting in underutilization due to avg_load.
	 *
	 * When prefer_spread is enabled, force the balance even when
	 * busiest group has some capacity but loaded with more than 1
	 * task.
	 */
	if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
	    busiest->group_no_capacity)
	    (busiest->group_no_capacity || env->prefer_spread))
		goto force_balance;

	/* Misfit tasks should be dealt with regardless of the avg load */
@@ -9827,6 +9833,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
	/* Looks like there is an imbalance. Compute it */
	env->src_grp_type = busiest->group_type;
	calculate_imbalance(env, &sds);

	/*
	 * If we couldn't find any imbalance, then boost the imbalance
	 * based on the group util.
	 */
	if (!env->imbalance && env->prefer_spread)
		env->imbalance = (busiest->group_util >> 1);

	trace_sched_load_balance_stats(sds.busiest->cpumask[0],
				busiest->group_type, busiest->avg_load,
				busiest->load_per_task,	sds.local->cpumask[0],
@@ -9936,7 +9950,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
		 * to: load_i * capacity_j > load_j * capacity_i;  where j is
		 * our previous maximum.
		 */
		if (load * busiest_capacity > busiest_load * capacity) {
		if (load * busiest_capacity >= busiest_load * capacity) {
			busiest_load = load;
			busiest_capacity = capacity;
			busiest = rq;
@@ -10085,7 +10099,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
	};

#ifdef CONFIG_SCHED_WALT
	env.prefer_spread = (prefer_spread_on_idle(this_cpu) &&
	env.prefer_spread = (idle != CPU_NOT_IDLE &&
				prefer_spread_on_idle(this_cpu,
				idle == CPU_NEWLY_IDLE) &&
				!((sd->flags & SD_ASYM_CPUCAPACITY) &&
				 !cpumask_test_cpu(this_cpu,
						 &asym_cap_sibling_cpus)));
@@ -10635,7 +10651,8 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
		max_cost += sd->max_newidle_lb_cost;

#ifdef CONFIG_SCHED_WALT
		if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu))
		if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu,
					idle == CPU_NEWLY_IDLE))
			continue;
#endif

@@ -10895,7 +10912,7 @@ static void nohz_balancer_kick(struct rq *rq)
	 */
	if (sched_energy_enabled()) {
		if (rq->nr_running >= 2 && (cpu_overutilized(cpu) ||
					prefer_spread_on_idle(cpu)))
					prefer_spread_on_idle(cpu, false)))
			flags = NOHZ_KICK_MASK;
		goto out;
	}
@@ -11304,7 +11321,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
	int pulled_task = 0;
	u64 curr_cost = 0;
	u64 avg_idle = this_rq->avg_idle;
	bool prefer_spread = prefer_spread_on_idle(this_cpu);
	bool prefer_spread = prefer_spread_on_idle(this_cpu, true);
	bool force_lb = (!is_min_capacity_cpu(this_cpu) &&
				silver_has_big_tasks() &&
				(atomic_read(&this_rq->nr_iowait) == 0));
+1 −1
Original line number Diff line number Diff line
@@ -552,7 +552,7 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1		= SYSCTL_ZERO,
		.extra2		= &two,
		.extra2		= &four,
	},
	{
		.procname	= "walt_rtg_cfs_boost_prio",