Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a123bbb authored by Viresh Kumar's avatar Viresh Kumar Committed by Ingo Molnar
Browse files

sched/fair: Drop always true parameter of update_cfs_rq_load_avg()



update_freq is always true and there is no need to pass it to
update_cfs_rq_load_avg(). Remove it.

Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: linaro-kernel@lists.linaro.org
Link: http://lkml.kernel.org/r/2d28d295f3f591ede7e931462bce1bda5aaa4896.1495603536.git.viresh.kumar@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9674f5ca
Loading
Loading
Loading
Loading
+7 −8
Original line number Original line Diff line number Diff line
@@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
			/*
			/*
			 * For !fair tasks do:
			 * For !fair tasks do:
			 *
			 *
			update_cfs_rq_load_avg(now, cfs_rq, false);
			update_cfs_rq_load_avg(now, cfs_rq);
			attach_entity_load_avg(cfs_rq, se);
			attach_entity_load_avg(cfs_rq, se);
			switched_from_fair(rq, p);
			switched_from_fair(rq, p);
			 *
			 *
@@ -3320,7 +3320,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
 * @now: current time, as per cfs_rq_clock_task()
 * @now: current time, as per cfs_rq_clock_task()
 * @cfs_rq: cfs_rq to update
 * @cfs_rq: cfs_rq to update
 * @update_freq: should we call cfs_rq_util_change() or will the call do so
 *
 *
 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
 * avg. The immediate corollary is that all (fair) tasks must be attached, see
 * avg. The immediate corollary is that all (fair) tasks must be attached, see
@@ -3334,7 +3333,7 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
 * call update_tg_load_avg() when this function returns true.
 * call update_tg_load_avg() when this function returns true.
 */
 */
static inline int
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
{
	struct sched_avg *sa = &cfs_rq->avg;
	struct sched_avg *sa = &cfs_rq->avg;
	int decayed, removed_load = 0, removed_util = 0;
	int decayed, removed_load = 0, removed_util = 0;
@@ -3362,7 +3361,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
	cfs_rq->load_last_update_time_copy = sa->last_update_time;
	cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
#endif


	if (update_freq && (decayed || removed_util))
	if (decayed || removed_util)
		cfs_rq_util_change(cfs_rq);
		cfs_rq_util_change(cfs_rq);


	return decayed || removed_load;
	return decayed || removed_load;
@@ -3390,7 +3389,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
		__update_load_avg_se(now, cpu, cfs_rq, se);
		__update_load_avg_se(now, cpu, cfs_rq, se);


	decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
	decayed |= propagate_entity_load_avg(se);
	decayed |= propagate_entity_load_avg(se);


	if (decayed && (flags & UPDATE_TG))
	if (decayed && (flags & UPDATE_TG))
@@ -3534,7 +3533,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
#else /* CONFIG_SMP */
#else /* CONFIG_SMP */


static inline int
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
{
	return 0;
	return 0;
}
}
@@ -6919,7 +6918,7 @@ static void update_blocked_averages(int cpu)
		if (throttled_hierarchy(cfs_rq))
		if (throttled_hierarchy(cfs_rq))
			continue;
			continue;


		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
			update_tg_load_avg(cfs_rq, 0);
			update_tg_load_avg(cfs_rq, 0);


		/* Propagate pending load changes to the parent, if any: */
		/* Propagate pending load changes to the parent, if any: */
@@ -6992,7 +6991,7 @@ static inline void update_blocked_averages(int cpu)


	rq_lock_irqsave(rq, &rf);
	rq_lock_irqsave(rq, &rf);
	update_rq_clock(rq);
	update_rq_clock(rq);
	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
	rq_unlock_irqrestore(rq, &rf);
	rq_unlock_irqrestore(rq, &rf);
}
}