Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 904c79c4 authored by Quentin Perret's avatar Quentin Perret
Browse files

sched: compute task utilisation with WALT consistently



Using WALT, the utilisation of a task is computed with a resolution
scaling factor that has been used inconsistently in the code with either
hardcoded values or macros (NICE_0_LOAD_SHIFT in this case). Changes in
these macros (as the 32 to 64 bits resolution shift of 2159197d)
happened to break the utilisation calculation wherever they have been
used whilst results remained correct in other places. This commit fixes
this issue by using SCHED_CAPACITY_SCALE as resolution scaling factor
consistently.

Change-Id: Ic5418f8a5dfc455a22bafbebb4142b4665b61c6f
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent a93e3124
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -654,7 +654,7 @@ TRACE_EVENT(sched_load_avg_task,
		__entry->util_avg_pelt  = avg->util_avg;
		__entry->util_avg_walt  = 0;
#ifdef CONFIG_SCHED_WALT
		__entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << NICE_0_LOAD_SHIFT);
		__entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << SCHED_CAPACITY_SHIFT);
		do_div(__entry->util_avg_walt, walt_ravg_window);
		if (!walt_disabled && sysctl_sched_use_walt_task_util)
			__entry->util_avg = __entry->util_avg_walt;
@@ -700,7 +700,7 @@ TRACE_EVENT(sched_load_avg_cpu,
		__entry->util_avg_walt	= 0;
#ifdef CONFIG_SCHED_WALT
		__entry->util_avg_walt	=
				cpu_rq(cpu)->prev_runnable_sum << NICE_0_LOAD_SHIFT;
			cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(__entry->util_avg_walt, walt_ravg_window);
		if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
			__entry->util_avg		= __entry->util_avg_walt;
@@ -1058,7 +1058,7 @@ TRACE_EVENT(walt_update_task_ravg,
		__entry->irqtime        = irqtime;
		__entry->cs             = rq->curr_runnable_sum;
		__entry->ps             = rq->prev_runnable_sum;
		__entry->util           = rq->prev_runnable_sum << NICE_0_LOAD_SHIFT;
		__entry->util           = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(__entry->util, walt_ravg_window);
		__entry->curr_window	= p->ravg.curr_window;
		__entry->prev_window	= p->ravg.prev_window;
@@ -1108,7 +1108,7 @@ TRACE_EVENT(walt_update_history,
		__entry->samples        = samples;
		__entry->evt            = evt;
		__entry->demand         = p->ravg.demand;
		__entry->walt_avg	= (__entry->demand << 10);
		__entry->walt_avg	= (__entry->demand << SCHED_CAPACITY_SHIFT);
		__entry->walt_avg	= div_u64(__entry->walt_avg,
						  walt_ravg_window);
		__entry->pelt_avg	= p->se.avg.util_avg;
+1 −1
Original line number Diff line number Diff line
@@ -5998,7 +5998,7 @@ static inline unsigned long task_util(struct task_struct *p)
#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
		unsigned long demand = p->ravg.demand;
		return (demand << 10) / walt_ravg_window;
		return (demand << SCHED_CAPACITY_SHIFT) / walt_ravg_window;
	}
#endif
	return p->se.avg.util_avg;