Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33747ced authored by Wei Wang's avatar Wei Wang Committed by DarkJoker360
Browse files

ANDROID: cpufreq: schedutil: maintain raw cache when next_f is not changed



Currently, the raw cache will be reset when next_f is changed after get_next_freq for correctness. However, it may introduce more cycles in those cases. This patch changes it to maintain the cached value instead of dropping it.

Bug: 159936782
Bug: 158863204
Signed-off-by: default avatarWei Wang <wvw@google.com>
[dereference23: Backport to 4.14]
Signed-off-by: default avatarAlexander Winkowski <dereference23@outlook.com>
Change-Id: I519ca02dd2e6038e3966e1f68fee641628827c82
parent 87c3e8e7
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ struct sugov_policy {
	s64 freq_update_delay_ns;
	unsigned int next_freq;
	unsigned int cached_raw_freq;
	unsigned int prev_cached_raw_freq;

	/* The next fields are only needed if fast switch cannot be used. */
	struct irq_work irq_work;
@@ -163,8 +164,8 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
		return;

	if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
		/* Don't cache a raw freq that didn't become next_freq */
		sg_policy->cached_raw_freq = 0;
		/* Restore cached freq as next_freq is not changed */
		sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
		return;
	}

@@ -221,6 +222,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,

	if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
		return sg_policy->next_freq;
	sg_policy->prev_cached_raw_freq = sg_policy->cached_raw_freq;
	sg_policy->cached_raw_freq = freq;
	return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -334,8 +336,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
	busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);

	if (flags & SCHED_CPUFREQ_RT_DL) {
		/* clear cache when it's bypassed */
		sg_policy->cached_raw_freq = 0;
		sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
		next_f = policy->cpuinfo.max_freq;
	} else {
		sugov_get_util(&util, &max, sg_cpu->cpu);
@@ -350,8 +351,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
		    sg_policy->next_freq != UINT_MAX) {
			next_f = sg_policy->next_freq;

			/* Reset cached freq as next_freq has changed */
			sg_policy->cached_raw_freq = 0;
			/* Restore cached freq as next_freq has changed */
			sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
		}
	}
	sugov_update_commit(sg_policy, time, next_f);
@@ -383,8 +384,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
			continue;
		}
		if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) {
			/* clear cache when it's bypassed */
			sg_policy->cached_raw_freq = 0;
			sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
			return policy->cpuinfo.max_freq;
		}

@@ -429,8 +429,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
		!(flags & SCHED_CPUFREQ_CONTINUE)) {
		if (flags & SCHED_CPUFREQ_RT_DL) {
			next_f = sg_policy->policy->cpuinfo.max_freq;
			/* clear cache when it's bypassed */
			sg_policy->cached_raw_freq = 0;
			sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq;
		} else {
			next_f = sugov_next_freq_shared(sg_cpu, time);
		}
@@ -838,6 +837,7 @@ static int sugov_start(struct cpufreq_policy *policy)
	sg_policy->work_in_progress = false;
	sg_policy->need_freq_update = false;
	sg_policy->cached_raw_freq = 0;
	sg_policy->prev_cached_raw_freq = 0;

	for_each_cpu(cpu, policy->cpus) {
		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);