Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f3e45428 authored by Quentin Perret's avatar Quentin Perret
Browse files

FROMLIST: sched/cpufreq: Prepare schedutil for Energy Aware Scheduling



Schedutil requests frequency by aggregating utilization signals from
the scheduler (CFS, RT, DL, IRQ) and applying a 25% margin on top of
them. Since Energy Aware Scheduling (EAS) needs to be able to predict
the frequency requests, it needs to forecast the decisions made by the
governor.

In order to prepare the introduction of EAS, introduce
schedutil_freq_util() to centralize the aforementioned signal
aggregation and make it available to both schedutil and EAS. Since
frequency selection and energy estimation still need to deal with RT and
DL signals slightly differently, schedutil_freq_util() is called with a
different 'type' parameter in those two contexts, and returns an
aggregated utilization signal accordingly. While at it, introduce the
map_util_freq() function which is designed to make schedutil's 25%
margin usable easily for both sugov and EAS.

As EAS will be able to predict schedutil's frequency requests more
accurately than any other governor by design, it'd be sensible to make
sure EAS cannot be used without schedutil. This will be done later, once
EAS has actually been introduced.

Change-Id: Idbeeb00926045507b73f9cba37630b38ae0816c0
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
Message-Id: <20181016101513.26919-3-quentin.perret@arm.com>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
parent 30f6e1b5
Loading
Loading
Loading
Loading
+6 −0
Original line number Original line Diff line number Diff line
@@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
                       void (*func)(struct update_util_data *data, u64 time,
                       void (*func)(struct update_util_data *data, u64 time,
				    unsigned int flags));
				    unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
void cpufreq_remove_update_util_hook(int cpu);

static inline unsigned long map_util_freq(unsigned long util,
					unsigned long freq, unsigned long cap)
{
	return (freq + (freq >> 2)) * util / cap;
}
#endif /* CONFIG_CPU_FREQ */
#endif /* CONFIG_CPU_FREQ */


#endif /* _LINUX_SCHED_CPUFREQ_H */
#endif /* _LINUX_SCHED_CPUFREQ_H */
+61 −31
Original line number Original line Diff line number Diff line
@@ -13,6 +13,7 @@


#include "sched.h"
#include "sched.h"


#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
#include <trace/events/power.h>


struct sugov_tunables {
struct sugov_tunables {
@@ -167,7 +168,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
	unsigned int freq = arch_scale_freq_invariant() ?
	unsigned int freq = arch_scale_freq_invariant() ?
				policy->cpuinfo.max_freq : policy->cur;
				policy->cpuinfo.max_freq : policy->cur;


	freq = (freq + (freq >> 2)) * util / max;
	freq = map_util_freq(util, freq, max);


	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
		return sg_policy->next_freq;
		return sg_policy->next_freq;
@@ -197,15 +198,13 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
 * based on the task model parameters and gives the minimal utilization
 * based on the task model parameters and gives the minimal utilization
 * required to meet deadlines.
 * required to meet deadlines.
 */
 */
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
				  unsigned long max, enum schedutil_type type)
{
{
	struct rq *rq = cpu_rq(sg_cpu->cpu);
	struct rq *rq = cpu_rq(cpu);
	unsigned long util, irq, max;
	unsigned long util, irq;


	sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
	if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt))
	sg_cpu->bw_dl = cpu_bw_dl(rq);

	if (rt_rq_is_runnable(&rq->rt))
		return max;
		return max;


	/*
	/*
@@ -223,20 +222,33 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
	 * utilization (PELT windows are synchronized) we can directly add them
	 * utilization (PELT windows are synchronized) we can directly add them
	 * to obtain the CPU's actual utilization.
	 * to obtain the CPU's actual utilization.
	 */
	 */
	util = cpu_util_cfs(rq);
	util = util_cfs;
	util += cpu_util_rt(rq);
	util += cpu_util_rt(rq);


	if (type == FREQUENCY_UTIL) {
		/*
		/*
	 * We do not make cpu_util_dl() a permanent part of this sum because we
		 * For frequency selection we do not make cpu_util_dl() a
	 * want to use cpu_bw_dl() later on, but we need to check if the
		 * permanent part of this sum because we want to use
	 * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
		 * cpu_bw_dl() later on, but we need to check if the
	 * f_max when there is no idle time.
		 * CFS+RT+DL sum is saturated (ie. no idle time) such
		 * that we select f_max when there is no idle time.
		 *
		 *
	 * NOTE: numerical errors or stop class might cause us to not quite hit
		 * NOTE: numerical errors or stop class might cause us
	 * saturation when we should -- something for later.
		 * to not quite hit saturation when we should --
		 * something for later.
		 */
		 */

		if ((util + cpu_util_dl(rq)) >= max)
		if ((util + cpu_util_dl(rq)) >= max)
			return max;
			return max;
	} else {
		/*
		 * OTOH, for energy computation we need the estimated
		 * running time, so include util_dl and ignore dl_bw.
		 */
		util += cpu_util_dl(rq);
		if (util >= max)
			return max;
	}


	/*
	/*
	 * There is still idle time; further improve the number by using the
	 * There is still idle time; further improve the number by using the
@@ -250,17 +262,35 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
	util = scale_irq_capacity(util, irq, max);
	util = scale_irq_capacity(util, irq, max);
	util += irq;
	util += irq;


	if (type == FREQUENCY_UTIL) {
		/*
		/*
	 * Bandwidth required by DEADLINE must always be granted while, for
		 * Bandwidth required by DEADLINE must always be granted
	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
		 * while, for FAIR and RT, we use blocked utilization of
	 * to gracefully reduce the frequency when no tasks show up for longer
		 * IDLE CPUs as a mechanism to gracefully reduce the
	 * periods of time.
		 * frequency when no tasks show up for longer periods of
		 * time.
		 *
		 *
	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
		 * Ideally we would like to set bw_dl as min/guaranteed
	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
		 * freq and util + bw_dl as requested freq. However,
	 * an interface. So, we only do the latter for now.
		 * cpufreq is not yet ready for such an interface. So,
		 * we only do the latter for now.
		 */
		 */
	return min(max, util + sg_cpu->bw_dl);
		util += cpu_bw_dl(rq);
	}

	return min(max, util);
}

static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
	struct rq *rq = cpu_rq(sg_cpu->cpu);
	unsigned long util = cpu_util_cfs(rq);
	unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);

	sg_cpu->max = max;
	sg_cpu->bw_dl = cpu_bw_dl(rq);

	return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
}
}


/**
/**
+30 −0
Original line number Original line Diff line number Diff line
@@ -2176,6 +2176,31 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#endif


#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
/**
 * enum schedutil_type - CPU utilization type
 * @FREQUENCY_UTIL:	Utilization used to select frequency
 * @ENERGY_UTIL:	Utilization used during energy calculation
 *
 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
 * need to be aggregated differently depending on the usage made of them. This
 * enum is used within schedutil_freq_util() to differentiate the types of
 * utilization expected by the callers, and adjust the aggregation accordingly.
 */
enum schedutil_type {
	FREQUENCY_UTIL,
	ENERGY_UTIL,
};

unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
			          unsigned long max, enum schedutil_type type);

static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
{
	unsigned long max = arch_scale_cpu_capacity(NULL, cpu);

	return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
}

static inline unsigned long cpu_bw_dl(struct rq *rq)
static inline unsigned long cpu_bw_dl(struct rq *rq)
{
{
	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
@@ -2202,6 +2227,11 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{
{
	return READ_ONCE(rq->avg_rt.util_avg);
	return READ_ONCE(rq->avg_rt.util_avg);
}
}
#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
{
	return cfs;
}
#endif
#endif


#ifdef HAVE_SCHED_AVG_IRQ
#ifdef HAVE_SCHED_AVG_IRQ