Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d8d595d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: update aggregate when holding the RQs



It was observed that in __update_group_shares_cpu()

  rq_weight > aggregate()->rq_weight

This is caused by forks/wakeups in between the initial aggregate pass and
locking of the RQs for load balance. To avoid this situation partially re-do
the aggregation once we have the RQs locked (which avoids new tasks from
appearing).

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b6a86c74
Loading
Loading
Loading
Loading
+20 −0
Original line number Original line Diff line number Diff line
@@ -1721,6 +1721,11 @@ aggregate_get_up(struct task_group *tg, int cpu, struct sched_domain *sd)
	aggregate_group_set_shares(tg, cpu, sd);
	aggregate_group_set_shares(tg, cpu, sd);
}
}


static void
aggregate_get_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
{
}

static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
static DEFINE_PER_CPU(spinlock_t, aggregate_lock);


static void __init init_aggregate(void)
static void __init init_aggregate(void)
@@ -1740,6 +1745,11 @@ static int get_aggregate(int cpu, struct sched_domain *sd)
	return 1;
	return 1;
}
}


static void update_aggregate(int cpu, struct sched_domain *sd)
{
	aggregate_walk_tree(aggregate_get_down, aggregate_get_nop, cpu, sd);
}

static void put_aggregate(int cpu, struct sched_domain *sd)
static void put_aggregate(int cpu, struct sched_domain *sd)
{
{
	spin_unlock(&per_cpu(aggregate_lock, cpu));
	spin_unlock(&per_cpu(aggregate_lock, cpu));
@@ -1761,6 +1771,10 @@ static inline int get_aggregate(int cpu, struct sched_domain *sd)
	return 0;
	return 0;
}
}


static inline void update_aggregate(int cpu, struct sched_domain *sd)
{
}

static inline void put_aggregate(int cpu, struct sched_domain *sd)
static inline void put_aggregate(int cpu, struct sched_domain *sd)
{
{
}
}
@@ -2192,6 +2206,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
	int load_idx = sd->forkexec_idx;
	int load_idx = sd->forkexec_idx;
	int imbalance = 100 + (sd->imbalance_pct-100)/2;
	int imbalance = 100 + (sd->imbalance_pct-100)/2;


	/*
	 * now that we have both rqs locked the rq weight won't change
	 * anymore - so update the stats.
	 */
	update_aggregate(this_cpu, sd);

	do {
	do {
		unsigned long load, avg_load;
		unsigned long load, avg_load;
		int local_group;
		int local_group;