Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a181ba9 authored by Shaleen Agrawal's avatar Shaleen Agrawal Committed by Jonathan Avila
Browse files

sched: Add schedutil snapshot



This snapshot is taken from msm-4.19 as of commit 2508aa54fb9e
(sched: Improve the scheduler").

Change-Id: I193900db684515f6585371b7c58d5b9c361c2e6b
Signed-off-by: default avatarShaleen Agrawal <shalagra@codeaurora.org>
parent 5b1b65d0
Loading
Loading
Loading
Loading
+55 −0
Original line number Diff line number Diff line
@@ -700,6 +700,61 @@ TRACE_EVENT(memlat_dev_update,
		__entry->vote)
);

TRACE_EVENT(sugov_util_update,
	    TP_PROTO(int cpu,
		     unsigned long util, unsigned long avg_cap,
		     unsigned long max_cap, unsigned long nl, unsigned long pl,
		     unsigned int rtgb, unsigned int flags),
	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, rtgb, flags),
	    TP_STRUCT__entry(
		    __field(int, cpu)
		    __field(unsigned long, util)
		    __field(unsigned long, avg_cap)
		    __field(unsigned long, max_cap)
		    __field(unsigned long, nl)
		    __field(unsigned long, pl)
		    __field(unsigned int, rtgb)
		    __field(unsigned int, flags)
	    ),
	    TP_fast_assign(
		    __entry->cpu = cpu;
		    __entry->util = util;
		    __entry->avg_cap = avg_cap;
		    __entry->max_cap = max_cap;
		    __entry->nl = nl;
		    __entry->pl = pl;
		    __entry->rtgb = rtgb;
		    __entry->flags = flags;
	    ),
	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu rtgb=%u flags=0x%x",
		      __entry->cpu, __entry->util, __entry->avg_cap,
		      __entry->max_cap, __entry->nl,
		      __entry->pl, __entry->rtgb, __entry->flags)
);

TRACE_EVENT(sugov_next_freq,
	    TP_PROTO(unsigned int cpu, unsigned long util, unsigned long max,
		     unsigned int freq),
	    TP_ARGS(cpu, util, max, freq),
	    TP_STRUCT__entry(
		    __field(unsigned int, cpu)
		    __field(unsigned long, util)
		    __field(unsigned long, max)
		    __field(unsigned int, freq)
	    ),
	    TP_fast_assign(
		    __entry->cpu = cpu;
		    __entry->util = util;
		    __entry->max = max;
		    __entry->freq = freq;
	    ),
	    TP_printk("cpu=%u util=%lu max=%lu freq=%u",
		      __entry->cpu,
		      __entry->util,
		      __entry->max,
		      __entry->freq)
);

#endif /* _TRACE_POWER_H */

/* This part must be outside protection */
+441 −29
Original line number Diff line number Diff line
@@ -12,19 +12,31 @@

#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
#include <linux/sched/sysctl.h>

#define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)

struct sugov_tunables {
	struct gov_attr_set	attr_set;
	unsigned int		rate_limit_us;
	unsigned int		hispeed_load;
	unsigned int		hispeed_freq;
	unsigned int		rtg_boost_freq;
	bool			pl;
};

struct sugov_policy {
	struct cpufreq_policy	*policy;

	u64 last_ws;
	u64 curr_cycles;
	u64 last_cyc_update_time;
	unsigned long avg_cap;
	struct sugov_tunables	*tunables;
	struct list_head	tunables_hook;
	unsigned long hispeed_util;
	unsigned long rtg_boost_util;
	unsigned long max;

	raw_spinlock_t		update_lock;	/* For shared policies */
	u64			last_freq_update_time;
@@ -53,6 +65,11 @@ struct sugov_cpu {
	unsigned int		iowait_boost;
	u64			last_update;

	struct sched_walt_cpu_load walt_load;

	unsigned long util;
	unsigned int flags;

	unsigned long		bw_dl;
	unsigned long		max;

@@ -63,6 +80,8 @@ struct sugov_cpu {
};

static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
static unsigned int stale_ns;
static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);

/************************ Governor internals ***********************/

@@ -101,6 +120,24 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
	return delta_ns >= sg_policy->freq_update_delay_ns;
}

static inline bool use_pelt(void)
{
#ifdef CONFIG_SCHED_WALT
	return false;
#else
	return true;
#endif
}

static inline bool conservative_pl(void)
{
#ifdef CONFIG_SCHED_WALT
	return sysctl_sched_conservative_pl;
#else
	return false;
#endif
}

static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
				   unsigned int next_freq)
{
@@ -113,6 +150,62 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
	return true;
}

static unsigned long freq_to_util(struct sugov_policy *sg_policy,
				  unsigned int freq)
{
	return mult_frac(sg_policy->max, freq,
			 sg_policy->policy->cpuinfo.max_freq);
}

#define KHZ 1000
static void sugov_track_cycles(struct sugov_policy *sg_policy,
				unsigned int prev_freq,
				u64 upto)
{
	u64 delta_ns, cycles;
	u64 next_ws = sg_policy->last_ws + sched_ravg_window;

	if (use_pelt())
		return;

	upto = min(upto, next_ws);
	/* Track cycles in current window */
	delta_ns = upto - sg_policy->last_cyc_update_time;
	delta_ns *= prev_freq;
	do_div(delta_ns, (NSEC_PER_SEC / KHZ));
	cycles = delta_ns;
	sg_policy->curr_cycles += cycles;
	sg_policy->last_cyc_update_time = upto;
}

static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
				unsigned int prev_freq)
{
	u64 last_ws = sg_policy->last_ws;
	unsigned int avg_freq;

	if (use_pelt())
		return;

	BUG_ON(curr_ws < last_ws);
	if (curr_ws <= last_ws)
		return;

	/* If we skipped some windows */
	if (curr_ws > (last_ws + sched_ravg_window)) {
		avg_freq = prev_freq;
		/* Reset tracking history */
		sg_policy->last_cyc_update_time = curr_ws;
	} else {
		sugov_track_cycles(sg_policy, prev_freq, curr_ws);
		avg_freq = sg_policy->curr_cycles;
		avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
	}
	sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
	sg_policy->curr_cycles = 0;
	sg_policy->last_ws = curr_ws;
}

static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
			      unsigned int next_freq)
{
@@ -122,6 +215,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
	if (!sugov_update_next_freq(sg_policy, time, next_freq))
		return;

	sugov_track_cycles(sg_policy, sg_policy->policy->cur, time);
	next_freq = cpufreq_driver_fast_switch(policy, next_freq);
	if (!next_freq)
		return;
@@ -140,12 +234,12 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
	if (!sugov_update_next_freq(sg_policy, time, next_freq))
		return;

	if (!sg_policy->work_in_progress) {
	if (use_pelt())
		sg_policy->work_in_progress = true;
	irq_work_queue(&sg_policy->irq_work);
}
}

#define TARGET_LOAD 80
/**
 * get_next_freq - Compute a new frequency for a given cpufreq policy.
 * @sg_policy: schedutil policy object to compute the new frequency for.
@@ -176,6 +270,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
				policy->cpuinfo.max_freq : policy->cur;

	freq = map_util_freq(util, freq, max);
	trace_sugov_next_freq(policy->cpu, util, max, freq);

	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
		return sg_policy->next_freq;
@@ -294,13 +389,21 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
	struct rq *rq = cpu_rq(sg_cpu->cpu);
	unsigned long util = cpu_util_cfs(rq);
	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
#ifdef CONFIG_SCHED_WALT

	sg_cpu->max = max;
	sg_cpu->bw_dl = cpu_bw_dl(rq);

	return cpu_util_freq(sg_cpu->cpu, &sg_cpu->walt_load);
#else
	unsigned long util = cpu_util_freq(sg_cpu->cpu, NULL) + cpu_util_rt(rq);

	sg_cpu->max = max;
	sg_cpu->bw_dl = cpu_bw_dl(rq);

	return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
#endif
}

/**
@@ -439,6 +542,45 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */

#define NL_RATIO 75
#define DEFAULT_HISPEED_LOAD 90
#define DEFAULT_CPU0_RTG_BOOST_FREQ 1000000
#define DEFAULT_CPU4_RTG_BOOST_FREQ 0
#define DEFAULT_CPU7_RTG_BOOST_FREQ 0
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
			      unsigned long *max)
{
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
	bool is_rtg_boost = sg_cpu->walt_load.rtgb_active;
	unsigned long nl = sg_cpu->walt_load.nl;
	unsigned long cpu_util = sg_cpu->util;
	bool is_hiload;
	unsigned long pl = sg_cpu->walt_load.pl;

	if (use_pelt())
		return;

	if (is_rtg_boost)
		*util = max(*util, sg_policy->rtg_boost_util);

	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
					   sg_policy->tunables->hispeed_load,
					   100));

	if (is_hiload && !is_migration)
		*util = max(*util, sg_policy->hispeed_util);

	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
		*util = *max;

	if (sg_policy->tunables->pl) {
		if (conservative_pl())
			pl = mult_frac(pl, TARGET_LOAD, 100);
		*util = max(*util, pl);
	}
}

/*
 * Make sugov_should_update_freq() ignore the rate limit when DL
 * has increased the utilization.
@@ -449,15 +591,28 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p
		sg_policy->limits_changed = true;
}

static inline unsigned long target_util(struct sugov_policy *sg_policy,
				  unsigned int freq)
{
	unsigned long util;

	util = freq_to_util(sg_policy, freq);
	util = mult_frac(util, TARGET_LOAD, 100);
	return util;
}

static void sugov_update_single(struct update_util_data *hook, u64 time,
				unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	unsigned long util, max;
	unsigned long util, max, hs_util, boost_util;
	unsigned int next_f;
	bool busy;

	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
		return;

	sugov_iowait_boost(sg_cpu, time, flags);
	sg_cpu->last_update = time;

@@ -467,11 +622,34 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
		return;

	/* Limits may have changed, don't skip frequency update */
	busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
	busy = use_pelt() && !sg_policy->need_freq_update &&
		sugov_cpu_is_busy(sg_cpu);

	util = sugov_get_util(sg_cpu);
	sg_cpu->util = util = sugov_get_util(sg_cpu);
	max = sg_cpu->max;
	sg_cpu->flags = flags;

	if (sg_policy->max != max) {
		sg_policy->max = max;
		hs_util = target_util(sg_policy,
				       sg_policy->tunables->hispeed_freq);
		sg_policy->hispeed_util = hs_util;

		boost_util = target_util(sg_policy,
				    sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
	}

	util = sugov_iowait_apply(sg_cpu, time, util, max);
	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
			   sg_policy->policy->cur);

	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
				sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
				sg_cpu->walt_load.pl,
				sg_cpu->walt_load.rtgb_active, flags);

	sugov_walt_adjust(sg_cpu, &util, &max);
	next_f = get_next_freq(sg_policy, util, max);
	/*
	 * Do not reduce the frequency if the CPU has not been idle
@@ -502,21 +680,46 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	struct cpufreq_policy *policy = sg_policy->policy;
	u64 last_freq_update_time = sg_policy->last_freq_update_time;
	unsigned long util = 0, max = 1;
	unsigned int j;

	for_each_cpu(j, policy->cpus) {
		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
		unsigned long j_util, j_max;
		s64 delta_ns;

		/*
		 * If the CPU utilization was last updated before the previous
		 * frequency update and the time elapsed between the last update
		 * of the CPU utilization and the last frequency update is long
		 * enough, don't take the CPU into account as it probably is
		 * idle now (and clear iowait_boost for it).
		 */
		delta_ns = last_freq_update_time - j_sg_cpu->last_update;
		if (delta_ns > stale_ns) {
			sugov_iowait_reset(j_sg_cpu, last_freq_update_time,
					   false);
			continue;
		}

		j_util = sugov_get_util(j_sg_cpu);
		/*
		 * If the util value for all CPUs in a policy is 0, just using >
		 * will result in a max value of 1. WALT stats can later update
		 * the aggregated util value, causing get_next_freq() to compute
		 * freq = max_freq * 1.25 * (util / max) for nonzero util,
		 * leading to spurious jumps to fmax.
		 */
		j_util = j_sg_cpu->util;
		j_max = j_sg_cpu->max;
		j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);

		if (j_util * max > j_max * util) {
		if (j_util * max >= j_max * util) {
			util = j_util;
			max = j_max;
		}

		sugov_walt_adjust(j_sg_cpu, &util, &max);
	}

	return get_next_freq(sg_policy, util, max);
@@ -527,16 +730,41 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
{
	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
	unsigned long hs_util, boost_util;
	unsigned int next_f;

	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
		return;

	sg_cpu->util = sugov_get_util(sg_cpu);
	sg_cpu->flags = flags;
	raw_spin_lock(&sg_policy->update_lock);

	if (sg_policy->max != sg_cpu->max) {
		sg_policy->max = sg_cpu->max;
		hs_util = target_util(sg_policy,
					sg_policy->tunables->hispeed_freq);
		sg_policy->hispeed_util = hs_util;

		boost_util = target_util(sg_policy,
				    sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
	}

	sugov_iowait_boost(sg_cpu, time, flags);
	sg_cpu->last_update = time;

	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
			   sg_policy->policy->cur);
	ignore_dl_rate_limit(sg_cpu, sg_policy);

	if (sugov_should_update_freq(sg_policy, time)) {
	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
				sg_cpu->max, sg_cpu->walt_load.nl,
				sg_cpu->walt_load.pl,
				sg_cpu->walt_load.rtgb_active, flags);

	if (sugov_should_update_freq(sg_policy, time) &&
	    !(flags & SCHED_CPUFREQ_CONTINUE)) {
		next_f = sugov_next_freq_shared(sg_cpu, time);

		if (sg_policy->policy->fast_switch_enabled)
@@ -566,7 +794,10 @@ static void sugov_work(struct kthread_work *work)
	 */
	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
	freq = sg_policy->next_freq;
	if (use_pelt())
		sg_policy->work_in_progress = false;
	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
			   ktime_get_ns());
	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);

	mutex_lock(&sg_policy->work_lock);
@@ -597,7 +828,7 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return sprintf(buf, "%u\n", tunables->rate_limit_us);
	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rate_limit_us);
}

static ssize_t
@@ -620,8 +851,117 @@ rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count

static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);

static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
}

static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
				  const char *buf, size_t count)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	if (kstrtouint(buf, 10, &tunables->hispeed_load))
		return -EINVAL;

	tunables->hispeed_load = min(100U, tunables->hispeed_load);

	return count;
}

static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
}

static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
					const char *buf, size_t count)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
	unsigned int val;
	struct sugov_policy *sg_policy;
	unsigned long hs_util;
	unsigned long flags;

	if (kstrtouint(buf, 10, &val))
		return -EINVAL;

	tunables->hispeed_freq = val;
	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		hs_util = target_util(sg_policy,
					sg_policy->tunables->hispeed_freq);
		sg_policy->hispeed_util = hs_util;
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	}

	return count;
}

static ssize_t rtg_boost_freq_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rtg_boost_freq);
}

static ssize_t rtg_boost_freq_store(struct gov_attr_set *attr_set,
				    const char *buf, size_t count)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
	unsigned int val;
	struct sugov_policy *sg_policy;
	unsigned long boost_util;
	unsigned long flags;

	if (kstrtouint(buf, 10, &val))
		return -EINVAL;

	tunables->rtg_boost_freq = val;
	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		boost_util = target_util(sg_policy,
					  sg_policy->tunables->rtg_boost_freq);
		sg_policy->rtg_boost_util = boost_util;
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	}

	return count;
}

static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
}

static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
				   size_t count)
{
	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

	if (kstrtobool(buf, &tunables->pl))
		return -EINVAL;

	return count;
}

static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct governor_attr rtg_boost_freq = __ATTR_RW(rtg_boost_freq);
static struct governor_attr pl = __ATTR_RW(pl);

static struct attribute *sugov_attrs[] = {
	&rate_limit_us.attr,
	&hispeed_load.attr,
	&hispeed_freq.attr,
	&rtg_boost_freq.attr,
	&pl.attr,
	NULL
};
ATTRIBUTE_GROUPS(sugov);
@@ -656,20 +996,7 @@ static void sugov_policy_free(struct sugov_policy *sg_policy)
static int sugov_kthread_create(struct sugov_policy *sg_policy)
{
	struct task_struct *thread;
	struct sched_attr attr = {
		.size		= sizeof(struct sched_attr),
		.sched_policy	= SCHED_DEADLINE,
		.sched_flags	= SCHED_FLAG_SUGOV,
		.sched_nice	= 0,
		.sched_priority	= 0,
		/*
		 * Fake (unused) bandwidth; workaround to "fix"
		 * priority inheritance.
		 */
		.sched_runtime	=  1000000,
		.sched_deadline = 10000000,
		.sched_period	= 10000000,
	};
	struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
	struct cpufreq_policy *policy = sg_policy->policy;
	int ret;

@@ -687,10 +1014,10 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
		return PTR_ERR(thread);
	}

	ret = sched_setattr_nocheck(thread, &attr);
	ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
	if (ret) {
		kthread_stop(thread);
		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
		pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
		return ret;
	}

@@ -728,6 +1055,31 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
	return tunables;
}

static void sugov_tunables_save(struct cpufreq_policy *policy,
		struct sugov_tunables *tunables)
{
	int cpu;
	struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);

	if (!have_governor_per_policy())
		return;

	if (!cached) {
		cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
		if (!cached)
			return;

		for_each_cpu(cpu, policy->related_cpus)
			per_cpu(cached_tunables, cpu) = cached;
	}

	cached->pl = tunables->pl;
	cached->hispeed_load = tunables->hispeed_load;
	cached->rtg_boost_freq = tunables->rtg_boost_freq;
	cached->hispeed_freq = tunables->hispeed_freq;
	cached->rate_limit_us = tunables->rate_limit_us;
}

static void sugov_tunables_free(struct sugov_tunables *tunables)
{
	if (!have_governor_per_policy())
@@ -736,10 +1088,27 @@ static void sugov_tunables_free(struct sugov_tunables *tunables)
	kfree(tunables);
}

static void sugov_tunables_restore(struct cpufreq_policy *policy)
{
	struct sugov_policy *sg_policy = policy->governor_data;
	struct sugov_tunables *tunables = sg_policy->tunables;
	struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);

	if (!cached)
		return;

	tunables->pl = cached->pl;
	tunables->hispeed_load = cached->hispeed_load;
	tunables->rtg_boost_freq = cached->rtg_boost_freq;
	tunables->hispeed_freq = cached->hispeed_freq;
	tunables->rate_limit_us = cached->rate_limit_us;
}

static int sugov_init(struct cpufreq_policy *policy)
{
	struct sugov_policy *sg_policy;
	struct sugov_tunables *tunables;
	unsigned long util;
	int ret = 0;

	/* State should be equivalent to EXIT */
@@ -779,10 +1148,32 @@ static int sugov_init(struct cpufreq_policy *policy)
	}

	tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
	tunables->hispeed_freq = 0;

	switch (policy->cpu) {
	default:
	case 0:
		tunables->rtg_boost_freq = DEFAULT_CPU0_RTG_BOOST_FREQ;
		break;
	case 4:
		tunables->rtg_boost_freq = DEFAULT_CPU4_RTG_BOOST_FREQ;
		break;
	case 7:
		tunables->rtg_boost_freq = DEFAULT_CPU7_RTG_BOOST_FREQ;
		break;
	}

	policy->governor_data = sg_policy;
	sg_policy->tunables = tunables;

	util = target_util(sg_policy, sg_policy->tunables->rtg_boost_freq);
	sg_policy->rtg_boost_util = util;

	stale_ns = sched_ravg_window + (sched_ravg_window >> 3);

	sugov_tunables_restore(policy);

	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
				   get_governor_parent_kobj(policy), "%s",
				   schedutil_gov.name);
@@ -822,8 +1213,10 @@ static void sugov_exit(struct cpufreq_policy *policy)

	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
	policy->governor_data = NULL;
	if (!count)
	if (!count) {
		sugov_tunables_save(policy, tunables);
		sugov_tunables_free(tunables);
	}

	mutex_unlock(&global_tunables_lock);

@@ -883,11 +1276,30 @@ static void sugov_stop(struct cpufreq_policy *policy)
static void sugov_limits(struct cpufreq_policy *policy)
{
	struct sugov_policy *sg_policy = policy->governor_data;
	unsigned long flags, now;
	unsigned int freq;

	if (!policy->fast_switch_enabled) {
		mutex_lock(&sg_policy->work_lock);
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
				   ktime_get_ns());
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
		cpufreq_policy_apply_limits(policy);
		mutex_unlock(&sg_policy->work_lock);
	} else {
		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
		freq = policy->cur;
		now = ktime_get_ns();

		/*
		 * cpufreq_driver_resolve_freq() has a clamp, so we do not need
		 * to do any sort of additional validation here.
		 */
		freq = cpufreq_driver_resolve_freq(policy, freq);
		sg_policy->cached_raw_freq = freq;
		sugov_fast_switch(sg_policy, now, freq);
		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
	}

	sg_policy->limits_changed = true;
+2 −2
Original line number Diff line number Diff line
@@ -2659,7 +2659,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
					cpu_of(rq)));
	if (data)
		data->func(data, rq_clock(rq), flags);
		data->func(data, sched_ktime_clock(), flags);
}
#else
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}