Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9437178f authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar
Browse files

sched: Update tg->shares after cpu.shares write



Formerly sched_group_set_shares would force a rebalance by overflowing domain
share sums.  Now that per-cpu averages are maintained we can set the true value
by issuing an update_cfs_shares() following a tg->shares update.

Also initialize tg se->load to 0 for consistency since we'll now set correct
weights on enqueue.

Signed-off-by: default avatarPaul Turner <pjt@google.com?>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.465521344@google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d6b55918
Loading
Loading
Loading
Loading
+11 −31
Original line number Diff line number Diff line
@@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
		se->cfs_rq = parent->my_q;

	se->my_q = cfs_rq;
	update_load_set(&se->load, tg->shares);
	update_load_set(&se->load, 0);
	se->parent = parent;
}
#endif
@@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
#endif /* CONFIG_CGROUP_SCHED */

#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
{
	struct cfs_rq *cfs_rq = se->cfs_rq;
	int on_rq;

	on_rq = se->on_rq;
	if (on_rq)
		dequeue_entity(cfs_rq, se, 0);

	update_load_set(&se->load, shares);

	if (on_rq)
		enqueue_entity(cfs_rq, se, 0);
}

static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
	struct cfs_rq *cfs_rq = se->cfs_rq;
	struct rq *rq = cfs_rq->rq;
	unsigned long flags;

	raw_spin_lock_irqsave(&rq->lock, flags);
	__set_se_shares(se, shares);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

static DEFINE_MUTEX(shares_mutex);

int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
	int i;
	unsigned long flags;

	/*
	 * We can't change the weight of the root cgroup.
@@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)

	tg->shares = shares;
	for_each_possible_cpu(i) {
		/*
		 * force a rebalance
		 */
		set_se_shares(tg->se[i], shares);
		struct rq *rq = cpu_rq(i);
		struct sched_entity *se;

		se = tg->se[i];
		/* Propagate contribution to hierarchy */
		raw_spin_lock_irqsave(&rq->lock, flags);
		for_each_sched_entity(se)
			update_cfs_shares(group_cfs_rq(se), 0);
		raw_spin_unlock_irqrestore(&rq->lock, flags);
	}

done: