Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c566e8e9 authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar
Browse files

sched: Aggregate total task_group load



Maintain a global running sum of the average load seen on each cfs_rq belonging
to each task group so that it may be used in calculating an appropriate
shares:weight distribution.

Signed-off-by: default avatarPaul Turner <pjt@google.com>
Reviewed-by: default avatarBen Segall <bsegall@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.792901086@google.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aff3e498
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -230,6 +230,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
			cfs_rq->runnable_load_avg);
	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
			cfs_rq->blocked_load_avg);
	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
			atomic64_read(&cfs_rq->tg->load_avg));
	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
			cfs_rq->tg_load_contrib);
#endif

	print_cfs_group_stats(m, cpu, cfs_rq->tg);
+22 −0
Original line number Diff line number Diff line
@@ -1102,6 +1102,26 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
	return decays;
}

#ifdef CONFIG_FAIR_GROUP_SCHED
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
						 int force_update)
{
	struct task_group *tg = cfs_rq->tg;
	s64 tg_contrib;

	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
	tg_contrib -= cfs_rq->tg_load_contrib;

	if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
		atomic64_add(tg_contrib, &tg->load_avg);
		cfs_rq->tg_load_contrib += tg_contrib;
	}
}
#else
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
						 int force_update) {}
#endif

/* Compute the current contribution to load_avg by se, return any delta */
static long __update_entity_load_avg_contrib(struct sched_entity *se)
{
@@ -1172,6 +1192,8 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
		atomic64_add(decays, &cfs_rq->decay_counter);
		cfs_rq->last_decay = now;
	}

	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
}

static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
+4 −0
Original line number Diff line number Diff line
@@ -112,6 +112,7 @@ struct task_group {
	unsigned long shares;

	atomic_t load_weight;
	atomic64_t load_avg;
#endif

#ifdef CONFIG_RT_GROUP_SCHED
@@ -232,6 +233,9 @@ struct cfs_rq {
	u64 runnable_load_avg, blocked_load_avg;
	atomic64_t decay_counter, removed_load;
	u64 last_decay;
#ifdef CONFIG_FAIR_GROUP_SCHED
	u64 tg_load_contrib;
#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */