Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 940959e9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: fixlet for group load balance



We should not only correct the increment for the initial group, but should
be consistent and do so for all the groups we encounter.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 63e5c398
Loading
Loading
Loading
Loading
+14 −13
Original line number Diff line number Diff line
@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu,
		long wl, long wg)
{
	struct sched_entity *se = tg->se[cpu];
	long more_w;

	if (!tg->parent)
		return wl;
@@ -1039,6 +1038,10 @@ static long effective_load(struct task_group *tg, int cpu,
	if (!wl && sched_feat(ASYM_EFF_LOAD))
		return wl;

	for_each_sched_entity(se) {
		long S, rw, s, a, b;
		long more_w;

		/*
		 * Instead of using this increment, also add the difference
		 * between when the shares were last updated and now.
@@ -1047,11 +1050,6 @@ static long effective_load(struct task_group *tg, int cpu,
		wl += more_w;
		wg += more_w;

	for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1)

		long S, rw, s, a, b;

		S = se->my_q->tg->shares;
		s = se->my_q->shares;
		rw = se->my_q->rq_weight;
@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu,
		a = S*(rw + wl);
		b = S*rw + s*wg;

		wl = s*(a-b)/D(b);
		wl = s*(a-b);

		if (likely(b))
			wl /= b;

		/*
		 * Assume the group is already running and will
		 * thus already be accounted for in the weight.
@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu,
		 * alter the group weight.
		 */
		wg = 0;
#undef D
	}

	return wl;