Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2582f0eb authored by Nikhil Rao's avatar Nikhil Rao Committed by Ingo Molnar
Browse files

sched: Set group_imb only a task can be pulled from the busiest cpu



When cycling through sched groups to determine the busiest group, set
group_imb only if the busiest cpu has more than 1 runnable task. This patch
fixes the case where two cpus in a group have one runnable task each, but there
is a large weight differential between these two tasks. The load balancer is
unable to migrate any task from this group, and hence do not consider this
group to be imbalanced.

Signed-off-by: default avatarNikhil Rao <ncrao@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1286996978-7007-3-git-send-email-ncrao@google.com>
[ small code readability edits ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ef8002f6
Loading
Loading
Loading
Loading
+7 −5
Original line number Original line Diff line number Diff line
@@ -2378,7 +2378,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
			int local_group, const struct cpumask *cpus,
			int local_group, const struct cpumask *cpus,
			int *balance, struct sg_lb_stats *sgs)
			int *balance, struct sg_lb_stats *sgs)
{
{
	unsigned long load, max_cpu_load, min_cpu_load;
	unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
	int i;
	int i;
	unsigned int balance_cpu = -1, first_idle_cpu = 0;
	unsigned int balance_cpu = -1, first_idle_cpu = 0;
	unsigned long avg_load_per_task = 0;
	unsigned long avg_load_per_task = 0;
@@ -2389,6 +2389,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
	/* Tally up the load of all CPUs in the group */
	/* Tally up the load of all CPUs in the group */
	max_cpu_load = 0;
	max_cpu_load = 0;
	min_cpu_load = ~0UL;
	min_cpu_load = ~0UL;
	max_nr_running = 0;


	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
		struct rq *rq = cpu_rq(i);
		struct rq *rq = cpu_rq(i);
@@ -2406,8 +2407,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
			load = target_load(i, load_idx);
			load = target_load(i, load_idx);
		} else {
		} else {
			load = source_load(i, load_idx);
			load = source_load(i, load_idx);
			if (load > max_cpu_load)
			if (load > max_cpu_load) {
				max_cpu_load = load;
				max_cpu_load = load;
				max_nr_running = rq->nr_running;
			}
			if (min_cpu_load > load)
			if (min_cpu_load > load)
				min_cpu_load = load;
				min_cpu_load = load;
		}
		}
@@ -2447,11 +2450,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
	if (sgs->sum_nr_running)
	if (sgs->sum_nr_running)
		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;


	if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
	if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
		sgs->group_imb = 1;
		sgs->group_imb = 1;


	sgs->group_capacity =
	sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
		DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
	if (!sgs->group_capacity)
	if (!sgs->group_capacity)
		sgs->group_capacity = fix_small_capacity(sd, group);
		sgs->group_capacity = fix_small_capacity(sd, group);
}
}