Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1936c53c authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar
Browse files

sched/fair: Reduce the periodic update duration



Instead of using the cfs_rq_is_decayed() which monitors all *_avg
and *_sum, we create a cfs_rq_has_blocked() which only takes care of
util_avg and load_avg. We are only interested by these 2 values which are
decaying faster than the *_sum so we can stop the periodic update earlier.

Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: brendan.jackman@arm.com
Cc: dietmar.eggemann@arm.com
Cc: morten.rasmussen@foss.arm.com
Cc: valentin.schneider@arm.com
Link: http://lkml.kernel.org/r/1518517879-2280-3-git-send-email-vincent.guittot@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f643ea22
Loading
Loading
Loading
Loading
+17 −4
Original line number Diff line number Diff line
@@ -7424,6 +7424,19 @@ static void attach_tasks(struct lb_env *env)
	rq_unlock(env->dst_rq, &rf);
}

static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
{
	if (cfs_rq->avg.load_avg)
		return true;

	if (cfs_rq->avg.util_avg)
		return true;

	return false;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
	if (cfs_rq->load.weight)
@@ -7441,8 +7454,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
	return true;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

static void update_blocked_averages(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
@@ -7478,7 +7489,9 @@ static void update_blocked_averages(int cpu)
		 */
		if (cfs_rq_is_decayed(cfs_rq))
			list_del_leaf_cfs_rq(cfs_rq);
		else

		/* Don't need periodic decay once load/util_avg are null */
		if (cfs_rq_has_blocked(cfs_rq))
			done = false;
	}

@@ -7548,7 +7561,7 @@ static inline void update_blocked_averages(int cpu)
	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
#ifdef CONFIG_NO_HZ_COMMON
	rq->last_blocked_load_update_tick = jiffies;
	if (cfs_rq_is_decayed(cfs_rq))
	if (!cfs_rq_has_blocked(cfs_rq))
		rq->has_blocked_load = 0;
#endif
	rq_unlock_irqrestore(rq, &rf);