Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c1dc6b2 authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Ingo Molnar
Browse files

sched: Make sched entity usage tracking scale-invariant



Apply frequency scale-invariance correction factor to usage tracking.

Each segment of the running_avg_sum geometric series is now scaled by the
current frequency so the utilization_avg_contrib of each entity will be
invariant with frequency scaling.

As a result, utilization_load_avg which is the sum of utilization_avg_contrib,
becomes invariant too. So the usage level that is returned by get_cpu_usage(),
stays relative to the max frequency as the cpu_capacity which is is compared against.

Then, we want the keep the load tracking values in a 32-bit type, which implies
that the max value of {runnable|running}_avg_sum must be lower than
2^32/88761=48388 (88761 is the max weigth of a task). As LOAD_AVG_MAX = 47742,
arch_scale_freq_capacity() must return a value less than
(48388/47742) << SCHED_CAPACITY_SHIFT = 1037 (SCHED_SCALE_CAPACITY = 1024).
So we define the range to [0..SCHED_SCALE_CAPACITY] in order to avoid overflow.

Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Morten.Rasmussen@arm.com
Cc: Paul Turner <pjt@google.com>
Cc: dietmar.eggemann@arm.com
Cc: efault@gmx.de
Cc: kamalesh@linux.vnet.ibm.com
Cc: linaro-kernel@lists.linaro.org
Cc: nicolas.pitre@linaro.org
Cc: preeti@linux.vnet.ibm.com
Cc: riel@redhat.com
Link: http://lkml.kernel.org/r/1425455186-13451-1-git-send-email-vincent.guittot@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a8faa8f5
Loading
Loading
Loading
Loading
+14 −7
Original line number Diff line number Diff line
@@ -2484,6 +2484,8 @@ static u32 __compute_runnable_contrib(u64 n)
	return contrib + runnable_avg_yN_sum[n];
}

unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);

/*
 * We can represent the historical contribution to runnable average as the
 * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2512,7 +2514,7 @@ static u32 __compute_runnable_contrib(u64 n)
 *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
 *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
 */
static __always_inline int __update_entity_runnable_avg(u64 now,
static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
							struct sched_avg *sa,
							int runnable,
							int running)
@@ -2520,6 +2522,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
	u64 delta, periods;
	u32 runnable_contrib;
	int delta_w, decayed = 0;
	unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);

	delta = now - sa->last_runnable_update;
	/*
@@ -2555,7 +2558,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
		if (runnable)
			sa->runnable_avg_sum += delta_w;
		if (running)
			sa->running_avg_sum += delta_w;
			sa->running_avg_sum += delta_w * scale_freq
				>> SCHED_CAPACITY_SHIFT;
		sa->avg_period += delta_w;

		delta -= delta_w;
@@ -2576,7 +2580,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
		if (runnable)
			sa->runnable_avg_sum += runnable_contrib;
		if (running)
			sa->running_avg_sum += runnable_contrib;
			sa->running_avg_sum += runnable_contrib * scale_freq
				>> SCHED_CAPACITY_SHIFT;
		sa->avg_period += runnable_contrib;
	}

@@ -2584,7 +2589,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
	if (runnable)
		sa->runnable_avg_sum += delta;
	if (running)
		sa->running_avg_sum += delta;
		sa->running_avg_sum += delta * scale_freq
			>> SCHED_CAPACITY_SHIFT;
	sa->avg_period += delta;

	return decayed;
@@ -2692,8 +2698,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)

static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
	__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable,
			runnable);
	__update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
			runnable, runnable);
	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
@@ -2771,6 +2777,7 @@ static inline void update_entity_load_avg(struct sched_entity *se,
{
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	long contrib_delta, utilization_delta;
	int cpu = cpu_of(rq_of(cfs_rq));
	u64 now;

	/*
@@ -2782,7 +2789,7 @@ static inline void update_entity_load_avg(struct sched_entity *se,
	else
		now = cfs_rq_clock_task(group_cfs_rq(se));

	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq,
	if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
					cfs_rq->curr == se))
		return;