Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4266ccd1 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: EAS/WALT: use cr_avg instead of prev_runnable_sum



WALT accounts two major statistics; CPU load and cumulative tasks
demand.

The CPU load which is account of accumulated each CPU's absolute
execution time is for CPU frequency guidance.  Whereas cumulative
tasks demand which is each CPU's instantaneous load to reflect
CPU's load at given time is for task placement decision.

Use cumulative tasks demand for cpu_util() for task placement and
introduce cpu_util_freq() for frequency guidance.

Change-Id: Id928f01dbc8cb2a617cdadc584c1f658022565c5
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 0a1621b5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3293,7 +3293,7 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
	unsigned long cpu_utilization = cpu_util(cpu);
	unsigned long cpu_utilization = cpu_util_freq(cpu, NULL);
	unsigned long capacity_curr = capacity_curr_of(cpu);

	if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+1 −1
Original line number Diff line number Diff line
@@ -6005,7 +6005,7 @@ schedtune_task_margin(struct task_struct *task)
static inline unsigned long
boosted_cpu_util(int cpu)
{
	unsigned long util = cpu_util(cpu);
	unsigned long util = cpu_util_freq(cpu, NULL);
	long margin = schedtune_cpu_margin(util, cpu);

	trace_sched_boost_cpu(cpu, util, margin);
+12 −6
Original line number Diff line number Diff line
@@ -1739,7 +1739,8 @@ static inline unsigned long __cpu_util(int cpu, int delta)

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg <<
		       SCHED_CAPACITY_SHIFT;
		do_div(util, sched_ravg_window);
	}
#endif
@@ -1764,14 +1765,19 @@ struct sched_walt_cpu_load {
static inline unsigned long
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
{
	unsigned long util = cpu_util(cpu);
	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
	unsigned long capacity = capacity_orig_of(cpu);

#ifdef CONFIG_SCHED_WALT
	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
		do_div(util, sched_ravg_window);

		if (walt_load)
			walt_load->prev_window_util = util;
	}
#endif

	return util;
	return (util >= capacity) ? capacity : util;
}
#endif