Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dba8cf26 authored by Saravana Kannan's avatar Saravana Kannan Committed by Joonwoo Park
Browse files

cpufreq: interactive: Compute target freq independent of policy min/max



When the existing code computes the target frequency, it limits the target
frequency to be within policy min/max. It does this to make sure the
governor doesn't set the CPU frequency to something outside the policy
min/max limits.

The problem with this is that when the limits are removed, the CPU
frequency takes time to catch up with the real load because the governor
needs to wait for the next recalculation and even when the recalculated
frequency is correct, hysteresis might be applied.

In reality, the load might have already been consistent enough to exceeded
the hysteresis criteria and cause a frequency change if it wasn't for the
policy limits. However, since the policy min/max limits the target
frequency from reflecting the increased need, the hysteresis criteria
doesn't get a chance to expire.

Since the CPUfreq framework already takes care of limiting the governor's
request to be within the policy min/max limits before it sets the CPU
frequency, there's no need to limit the computation of target frequency to
be within policy min/max.

That way, when limits are removed, we can use the current target frequency
as is and immediately jump to a CPU frequency that's appropriate for the
current load.

Change-Id: Idc02359f6ff91530ff69de8edd8a25c275642099
Signed-off-by: default avatarSaravana Kannan <skannan@codeaurora.org>
parent 238864b8
Loading
Loading
Loading
Loading
+13 −18
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ struct cpufreq_interactive_policyinfo {
	spinlock_t load_lock; /* protects load tracking stat */
	u64 last_evaluated_jiffy;
	struct cpufreq_policy *policy;
	struct cpufreq_policy p_nolim; /* policy copy with no limits */
	struct cpufreq_frequency_table *freq_table;
	spinlock_t target_freq_lock; /*protects target freq */
	unsigned int target_freq;
@@ -339,7 +340,7 @@ static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
		 */

		if (cpufreq_frequency_table_target(
			    pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
			    &pcpu->p_nolim, pcpu->freq_table, loadadjfreq / tl,
			    CPUFREQ_RELATION_L, &index))
			break;
		freq = pcpu->freq_table[index].frequency;
@@ -354,7 +355,7 @@ static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
				 * than freqmax.
				 */
				if (cpufreq_frequency_table_target(
					    pcpu->policy, pcpu->freq_table,
					    &pcpu->p_nolim, pcpu->freq_table,
					    freqmax - 1, CPUFREQ_RELATION_H,
					    &index))
					break;
@@ -381,7 +382,7 @@ static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
				 * than freqmin.
				 */
				if (cpufreq_frequency_table_target(
					    pcpu->policy, pcpu->freq_table,
					    &pcpu->p_nolim, pcpu->freq_table,
					    freqmin + 1, CPUFREQ_RELATION_L,
					    &index))
					break;
@@ -520,7 +521,7 @@ static void __cpufreq_interactive_timer(unsigned long data, bool is_notif)
	}

	if (policy_max_fast_restore || jump_to_max) {
		new_freq = ppol->policy->max;
		new_freq = ppol->policy->cpuinfo.max_freq;
	} else if (skip_hispeed_logic) {
		new_freq = choose_freq(ppol, loadadjfreq);
	} else if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
@@ -557,7 +558,7 @@ static void __cpufreq_interactive_timer(unsigned long data, bool is_notif)

	ppol->hispeed_validate_time = now;

	if (cpufreq_frequency_table_target(ppol->policy, ppol->freq_table,
	if (cpufreq_frequency_table_target(&ppol->p_nolim, ppol->freq_table,
					   new_freq, CPUFREQ_RELATION_L,
					   &index)) {
		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
@@ -598,7 +599,7 @@ static void __cpufreq_interactive_timer(unsigned long data, bool is_notif)
		ppol->floor_validate_time = now;
	}

	if (new_freq == ppol->policy->max && !policy_max_fast_restore)
	if (new_freq >= ppol->policy->max && !policy_max_fast_restore)
		ppol->max_freq_hyst_start_time = now;

	if (ppol->target_freq == new_freq &&
@@ -1579,7 +1580,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
	struct cpufreq_interactive_policyinfo *ppol;
	struct cpufreq_frequency_table *freq_table;
	struct cpufreq_interactive_tunables *tunables;
	unsigned long flags;

	if (have_governor_per_policy())
		tunables = policy->governor_data;
@@ -1684,6 +1684,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		ppol->policy = policy;
		ppol->target_freq = policy->cur;
		ppol->freq_table = freq_table;
		ppol->p_nolim = *policy;
		ppol->p_nolim.min = policy->cpuinfo.min_freq;
		ppol->p_nolim.max = policy->cpuinfo.max_freq;
		ppol->floor_freq = ppol->target_freq;
		ppol->floor_validate_time = ktime_to_us(ktime_get());
		ppol->hispeed_validate_time = ppol->floor_validate_time;
@@ -1719,26 +1722,18 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
		break;

	case CPUFREQ_GOV_LIMITS:
		__cpufreq_driver_target(policy,
				policy->cur, CPUFREQ_RELATION_L);

		ppol = per_cpu(polinfo, policy->cpu);

		__cpufreq_driver_target(policy,
				ppol->target_freq, CPUFREQ_RELATION_L);

		down_read(&ppol->enable_sem);
		if (ppol->governor_enabled) {
			spin_lock_irqsave(&ppol->target_freq_lock, flags);
			if (policy->max < ppol->target_freq)
				ppol->target_freq = policy->max;
			else if (policy->min > ppol->target_freq)
				ppol->target_freq = policy->min;
			spin_unlock_irqrestore(&ppol->target_freq_lock, flags);

			if (policy->min < ppol->min_freq)
				cpufreq_interactive_timer_resched(policy->cpu,
								  true);
			ppol->min_freq = policy->min;
		}

		up_read(&ppol->enable_sem);

		break;