Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a40d3ce5 authored by Syed Rameez Mustafa's avatar Syed Rameez Mustafa Committed by Gerrit - the friendly Code Review server
Browse files

sched: Avoid unnecessary load balance when tasks don't fit on dst_cpu



When considering to pull over a task that does not fit on the
destination CPU make sure that the busiest group has exceeded its
capacity. While the change is applicable to all groups, the biggest
impact will be on migrating big tasks to little CPUs. This should
only happen when the big cluster is no longer capable of balancing
load within the cluster. This change should have no impact on single
cluster systems.

Change-Id: I6d1ef0e0d878460530f036921ce4a4a9c1e1394b
Signed-off-by: default avatarSyed Rameez Mustafa <rameezmustafa@codeaurora.org>
parent de724756
Loading
Loading
Loading
Loading
+36 −19
Original line number Diff line number Diff line
@@ -2232,6 +2232,11 @@ static inline int nr_big_tasks(struct rq *rq)

#define sched_enable_power_aware 0

static inline int task_will_fit(struct task_struct *p, int cpu)
{
	return 1;
}

static inline int select_best_cpu(struct task_struct *p, int target, int reason)
{
	return 0;
@@ -5074,6 +5079,8 @@ struct lb_env {
	long			imbalance;
	/* The set of CPUs under consideration for load-balancing */
	struct cpumask		*cpus;
	unsigned int		busiest_grp_capacity;
	unsigned int		busiest_nr_running;

	unsigned int		flags;

@@ -5156,6 +5163,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
	if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
		return 0;

	if (!task_will_fit(p, env->dst_cpu) &&
			env->busiest_nr_running <= env->busiest_grp_capacity)
		return 0;

	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
		int cpu;

@@ -5966,9 +5977,11 @@ static inline void update_sd_lb_stats(struct lb_env *env,
		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
			sds->max_load = sgs.avg_load;
			sds->busiest = sg;
			sds->busiest_nr_running = sgs.sum_nr_running;
			env->busiest_nr_running = sds->busiest_nr_running
							= sgs.sum_nr_running;
			sds->busiest_idle_cpus = sgs.idle_cpus;
			sds->busiest_group_capacity = sgs.group_capacity;
			env->busiest_grp_capacity = sds->busiest_group_capacity
							= sgs.group_capacity;
			sds->busiest_load_per_task = sgs.sum_weighted_load;
			sds->busiest_has_capacity = sgs.group_has_capacity;
			sds->busiest_group_weight = sgs.group_weight;
@@ -6410,6 +6423,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
		.dst_rq			= this_rq,
		.dst_grpmask    	= sched_group_cpus(sd->groups),
		.idle			= idle,
		.busiest_nr_running 	= 0,
		.busiest_grp_capacity 	= 0,
		.loop_break		= sched_nr_migrate_break,
		.cpus			= cpus,
		.flags			= 0,
@@ -6757,6 +6772,8 @@ static int active_load_balance_cpu_stop(void *data)
		.src_cpu		= busiest_rq->cpu,
		.src_rq			= busiest_rq,
		.idle			= CPU_IDLE,
		.busiest_nr_running 	= 0,
		.busiest_grp_capacity 	= 0,
		.flags			= 0,
		.loop			= 0,
	};