Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6b55918 authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar
Browse files

sched: Allow update_cfs_load() to update global load



Refactor the global load updates from update_shares_cpu() so that
update_cfs_load() can update global load when it is more than ~10%
out of sync.

The new global_load parameter allows us to force an update, regardless of
the error factor so that we can synchronize w/ update_shares().

Signed-off-by: default avatarPaul Turner <pjt@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.377473595@google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3b3d190e
Loading
Loading
Loading
Loading
+29 −15
Original line number Diff line number Diff line
@@ -539,7 +539,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
	return calc_delta_fair(sched_slice(cfs_rq, se), se);
}

static void update_cfs_load(struct cfs_rq *cfs_rq);
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);

/*
@@ -565,7 +565,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
#ifdef CONFIG_FAIR_GROUP_SCHED
	cfs_rq->load_unacc_exec_time += delta_exec;
	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
		update_cfs_load(cfs_rq);
		update_cfs_load(cfs_rq, 0);
		update_cfs_shares(cfs_rq, 0);
	}
#endif
@@ -704,7 +704,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
}

#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
static void update_cfs_load(struct cfs_rq *cfs_rq)
static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
					    int global_update)
{
	struct task_group *tg = cfs_rq->tg;
	long load_avg;

	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
	load_avg -= cfs_rq->load_contribution;

	if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
		atomic_add(load_avg, &tg->load_weight);
		cfs_rq->load_contribution += load_avg;
	}
}

static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
	u64 period = sysctl_sched_shares_window;
	u64 now, delta;
@@ -731,6 +746,11 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
		cfs_rq->load_avg += delta * load;
	}

	/* consider updating load contribution on each fold or truncate */
	if (global_update || cfs_rq->load_period > period
	    || !cfs_rq->load_period)
		update_cfs_rq_load_contribution(cfs_rq, global_update);

	while (cfs_rq->load_period > period) {
		/*
		 * Inline assembly required to prevent the compiler
@@ -790,7 +810,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
	reweight_entity(cfs_rq_of(se), se, shares);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_cfs_load(struct cfs_rq *cfs_rq)
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}

@@ -920,7 +940,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
	 * Update run-time statistics of the 'current'.
	 */
	update_curr(cfs_rq);
	update_cfs_load(cfs_rq);
	update_cfs_load(cfs_rq, 0);
	update_cfs_shares(cfs_rq, se->load.weight);
	account_entity_enqueue(cfs_rq, se);

@@ -981,7 +1001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
	if (se != cfs_rq->curr)
		__dequeue_entity(cfs_rq, se);
	se->on_rq = 0;
	update_cfs_load(cfs_rq);
	update_cfs_load(cfs_rq, 0);
	account_entity_dequeue(cfs_rq, se);
	update_min_vruntime(cfs_rq);
	update_cfs_shares(cfs_rq, 0);
@@ -1216,7 +1236,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);

		update_cfs_load(cfs_rq);
		update_cfs_load(cfs_rq, 0);
		update_cfs_shares(cfs_rq, 0);
	}

@@ -1246,7 +1266,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);

		update_cfs_load(cfs_rq);
		update_cfs_load(cfs_rq, 0);
		update_cfs_shares(cfs_rq, 0);
	}

@@ -2052,7 +2072,6 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
	struct cfs_rq *cfs_rq;
	unsigned long flags;
	struct rq *rq;
	long load_avg;

	if (!tg->se[cpu])
		return 0;
@@ -2063,12 +2082,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
	raw_spin_lock_irqsave(&rq->lock, flags);

	update_rq_clock(rq);
	update_cfs_load(cfs_rq);

	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
	load_avg -= cfs_rq->load_contribution;
	atomic_add(load_avg, &tg->load_weight);
	cfs_rq->load_contribution += load_avg;
	update_cfs_load(cfs_rq, 1);

	/*
	 * We need to update shares after updating tg->load_weight in