Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b34bba6 authored by Joonwoo Park's avatar Joonwoo Park Committed by Todd Kjos
Browse files

sched: EAS/WALT: use cr_avg instead of prev_runnable_sum



WALT accounts two major statistics; CPU load and cumulative tasks
demand.

The CPU load which is account of accumulated each CPU's absolute
execution time is for CPU frequency guidance.  Whereas cumulative
tasks demand which is each CPU's instantaneous load to reflect
CPU's load at given time is for task placement decision.

Use cumulative tasks demand for cpu_util() for task placement and
introduce cpu_util_freq() for frequency guidance.

Change-Id: Id928f01dbc8cb2a617cdadc584c1f658022565c5
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
(cherry picked from commit ee4cebd75ed7b77132c39c0093923f9ff1bcafaa)
[removed schedfreq dependency]
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 8b1a1ce1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -4789,7 +4789,7 @@ static inline void hrtick_update(struct rq *rq)
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
#define boosted_cpu_util(cpu) cpu_util(cpu)
#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif

/*
@@ -6110,7 +6110,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
	unsigned long util = cpu_util(cpu);
	unsigned long util = cpu_util_freq(cpu);
	long margin = schedtune_cpu_margin(util, cpu);

	trace_sched_boost_cpu(cpu, util, margin);
+15 −1
Original line number Diff line number Diff line
@@ -1639,7 +1639,7 @@ static inline unsigned long __cpu_util(int cpu, int delta)

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_CAPACITY_SHIFT;
		util = div_u64(util, walt_ravg_window);
	}
#endif
@@ -1655,6 +1655,20 @@ static inline unsigned long cpu_util(int cpu)
	return __cpu_util(cpu, 0);
}

static inline unsigned long cpu_util_freq(int cpu)
{
	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
	unsigned long capacity = capacity_orig_of(cpu);

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(util, walt_ravg_window);
	}
#endif
	return (util >= capacity) ? capacity : util;
}

#endif

static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)