Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4568725 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki Committed by Andres Oportus
Browse files

BACKPORT: cpufreq / sched: Pass flags to cpufreq_update_util()



It is useful to know the reason why cpufreq_update_util() has just
been called and that can be passed as flags to cpufreq_update_util()
and to the ->func() callback in struct update_util_data.  However,
doing that in addition to passing the util and max arguments they
already take would be clumsy, so avoid it.

Instead, use the observation that the schedutil governor is part
of the scheduler proper, so it can access scheduler data directly.
This allows the util and max arguments of cpufreq_update_util()
and the ->func() callback in struct update_util_data to be replaced
with a flags one, but schedutil has to be modified to follow.

Thus make the schedutil governor obtain the CFS utilization
information from the scheduler and use the "RT" and "DL" flags
instead of the special utilization value of ULONG_MAX to track
updates from the RT and DL sched classes.  Make it non-modular
too to avoid having to export scheduler variables to modules at
large.

Next, update all of the other users of cpufreq_update_util()
and the ->func() callback in struct update_util_data accordingly.

Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
(cherry picked from commit 58919e83)
parent d7439bce
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -245,7 +245,7 @@ config CPU_FREQ_GOV_INTERACTIVE
	  If in doubt, say N.

config CPU_FREQ_GOV_SCHEDUTIL
	tristate "'schedutil' cpufreq policy governor"
	bool "'schedutil' cpufreq policy governor"
	depends on CPU_FREQ && SMP
	select CPU_FREQ_GOV_ATTR_SET
	select IRQ_WORK
@@ -259,9 +259,6 @@ config CPU_FREQ_GOV_SCHEDUTIL
	  frequency tipping point is at utilization/capacity equal to 80% in
	  both cases.

	  To compile this driver as a module, choose M here: the module will
	  be called cpufreq_schedutil.

	  If in doubt, say N.

comment "CPU frequency scaling drivers"
+37 −30
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <trace/events/power.h>

@@ -53,6 +52,7 @@ struct sugov_cpu {
	unsigned long util;
	unsigned long max;
	u64 last_update;
	unsigned int flags;
};

static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -144,24 +144,39 @@ static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
	return cpufreq_driver_resolve_freq(policy, freq);
}

static void sugov_get_util(unsigned long *util, unsigned long *max)
{
	struct rq *rq = this_rq();
	unsigned long cfs_max = rq->cpu_capacity_orig;

	*util = min(rq->cfs.avg.util_avg, cfs_max);
	*max = cfs_max;
}

static void sugov_update_single(struct update_util_data *hook, u64 time,
				unsigned long util, unsigned long max)
				unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	struct cpufreq_policy *policy = sg_policy->policy;
	unsigned long util, max;
	unsigned int next_f;

	if (!sugov_should_update_freq(sg_policy, time))
		return;

	next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
			get_next_freq(sg_cpu, util, max);
	if (flags & SCHED_CPUFREQ_RT_DL) {
		next_f = policy->cpuinfo.max_freq;
	} else {
		sugov_get_util(&util, &max);
		next_f = get_next_freq(sg_cpu, util, max);
	}
	sugov_update_commit(sg_policy, time, next_f);
}

static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
					   unsigned long util, unsigned long max)
					   unsigned long util, unsigned long max,
					   unsigned int flags)
{
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	struct cpufreq_policy *policy = sg_policy->policy;
@@ -169,7 +184,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
	u64 last_freq_update_time = sg_policy->last_freq_update_time;
	unsigned int j;

	if (util == ULONG_MAX)
	if (flags & SCHED_CPUFREQ_RT_DL)
		return max_f;

	for_each_cpu(j, policy->cpus) {
@@ -192,10 +207,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
		if (delta_ns > TICK_NSEC)
			continue;

		j_util = j_sg_cpu->util;
		if (j_util == ULONG_MAX)
		if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
			return max_f;

		j_util = j_sg_cpu->util;
		j_max = j_sg_cpu->max;
		if (j_util * max > j_max * util) {
			util = j_util;
@@ -207,20 +222,24 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
}

static void sugov_update_shared(struct update_util_data *hook, u64 time,
				unsigned long util, unsigned long max)
				unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	unsigned long util, max;
	unsigned int next_f;

	sugov_get_util(&util, &max);

	raw_spin_lock(&sg_policy->update_lock);

	sg_cpu->util = util;
	sg_cpu->max = max;
	sg_cpu->flags = flags;
	sg_cpu->last_update = time;

	if (sugov_should_update_freq(sg_policy, time)) {
		next_f = sugov_next_freq_shared(sg_cpu, util, max);
		next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
		sugov_update_commit(sg_policy, time, next_f);
	}

@@ -444,8 +463,9 @@ static int sugov_start(struct cpufreq_policy *policy)

		sg_cpu->sg_policy = sg_policy;
		if (policy_is_shared(policy)) {
			sg_cpu->util = ULONG_MAX;
			sg_cpu->util = 0;
			sg_cpu->max = 0;
			sg_cpu->flags = SCHED_CPUFREQ_RT;
			sg_cpu->last_update = 0;
			sg_cpu->cached_raw_freq = 0;
			cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
@@ -495,28 +515,15 @@ static struct cpufreq_governor schedutil_gov = {
	.limits = sugov_limits,
};

static int __init sugov_module_init(void)
{
	return cpufreq_register_governor(&schedutil_gov);
}

static void __exit sugov_module_exit(void)
{
	cpufreq_unregister_governor(&schedutil_gov);
}

MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>");
MODULE_DESCRIPTION("Utilization-based CPU frequency selection");
MODULE_LICENSE("GPL");

#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
struct cpufreq_governor *cpufreq_default_governor(void)
{
	return &schedutil_gov;
}

fs_initcall(sugov_module_init);
#else
module_init(sugov_module_init);
#endif
module_exit(sugov_module_exit);

static int __init sugov_register(void)
{
	return cpufreq_register_governor(&schedutil_gov);
}
fs_initcall(sugov_register);