Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9fa64d64 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge back intel_pstate fixes for v4.6.

* pm-cpufreq:
  intel_pstate: Avoid extra invocation of intel_pstate_sample()
  intel_pstate: Do not set utilization update hook too early
parents f55532a0 febce40f
Loading
Loading
Loading
Loading
+32 −12
Original line number Diff line number Diff line
@@ -910,7 +910,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
	cpu->prev_aperf = aperf;
	cpu->prev_mperf = mperf;
	cpu->prev_tsc = tsc;
	return true;
	/*
	 * First time this function is invoked in a given cycle, all of the
	 * previous sample data fields are equal to zero or stale and they must
	 * be populated with meaningful numbers for things to work, so assume
	 * that sample.time will always be reset before setting the utilization
	 * update hook and make the caller skip the sample then.
	 */
	return !!cpu->last_sample_time;
}

static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -984,8 +991,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
	 * enough period of time to adjust our busyness.
	 */
	duration_ns = cpu->sample.time - cpu->last_sample_time;
	if ((s64)duration_ns > pid_params.sample_rate_ns * 3
	    && cpu->last_sample_time > 0) {
	if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
		sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
				      int_tofp(duration_ns));
		core_busy = mul_fp(core_busy, sample_ratio);
@@ -1100,10 +1106,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
	intel_pstate_get_cpu_pstates(cpu);

	intel_pstate_busy_pid_reset(cpu);
	intel_pstate_sample(cpu, 0);

	cpu->update_util.func = intel_pstate_update_util;
	cpufreq_set_update_util_data(cpunum, &cpu->update_util);

	pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);

@@ -1122,18 +1126,33 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
	return get_avg_frequency(cpu);
}

static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
	struct cpudata *cpu = all_cpu_data[cpu_num];

	/* Prevent intel_pstate_update_util() from using stale data. */
	cpu->sample.time = 0;
	cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
}

static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
	cpufreq_set_update_util_data(cpu, NULL);
	synchronize_sched();
}

static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
	if (!policy->cpuinfo.max_freq)
		return -ENODEV;

	intel_pstate_clear_update_util_hook(policy->cpu);

	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
	    policy->max >= policy->cpuinfo.max_freq) {
		pr_debug("intel_pstate: set performance\n");
		limits = &performance_limits;
		if (hwp_active)
			intel_pstate_hwp_set(policy->cpus);
		return 0;
		goto out;
	}

	pr_debug("intel_pstate: set powersave\n");
@@ -1163,6 +1182,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
				  int_tofp(100));

 out:
	intel_pstate_set_update_util_hook(policy->cpu);

	if (hwp_active)
		intel_pstate_hwp_set(policy->cpus);

@@ -1187,8 +1209,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)

	pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);

	cpufreq_set_update_util_data(cpu_num, NULL);
	synchronize_sched();
	intel_pstate_clear_update_util_hook(cpu_num);

	if (hwp_active)
		return;
@@ -1455,8 +1476,7 @@ static int __init intel_pstate_init(void)
	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu]) {
			cpufreq_set_update_util_data(cpu, NULL);
			synchronize_sched();
			intel_pstate_clear_update_util_hook(cpu);
			kfree(all_cpu_data[cpu]);
		}
	}