Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08a10002 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branch 'pm-cpufreq-sched'

* pm-cpufreq-sched:
  cpufreq: schedutil: Always process remote callback with slow switching
  cpufreq: schedutil: Don't restrict kthread to related_cpus unnecessarily
  cpufreq: Return 0 from ->fast_switch() on errors
  cpufreq: Simplify cpufreq_can_do_remote_dvfs()
  cpufreq: Process remote callbacks from any CPU if the platform permits
  sched: cpufreq: Allow remote cpufreq callbacks
  cpufreq: schedutil: Use unsigned int for iowait boost
  cpufreq: schedutil: Make iowait boost more energy efficient
parents bd87c8fb c49cbc19
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
		transition_latency = CPUFREQ_ETERNAL;
		transition_latency = CPUFREQ_ETERNAL;


	policy->cpuinfo.transition_latency = transition_latency;
	policy->cpuinfo.transition_latency = transition_latency;
	policy->dvfs_possible_from_any_cpu = true;


	return 0;
	return 0;


+4 −3
Original line number Original line Diff line number Diff line
@@ -1843,9 +1843,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
 * twice in parallel for the same policy and that it will never be called in
 * twice in parallel for the same policy and that it will never be called in
 * parallel with either ->target() or ->target_index() for the same policy.
 * parallel with either ->target() or ->target_index() for the same policy.
 *
 *
 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
 * Returns the actual frequency set for the CPU.
 * callback to indicate an error condition, the hardware configuration must be
 *
 * preserved.
 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
 * error condition, the hardware configuration must be preserved.
 */
 */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
					unsigned int target_freq)
					unsigned int target_freq)
+3 −0
Original line number Original line Diff line number Diff line
@@ -272,6 +272,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
	u64 delta_ns, lst;
	u64 delta_ns, lst;


	if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
		return;

	/*
	/*
	 * The work may not be allowed to be queued up right now.
	 * The work may not be allowed to be queued up right now.
	 * Possible reasons:
	 * Possible reasons:
+8 −0
Original line number Original line Diff line number Diff line
@@ -1746,6 +1746,10 @@ static void intel_pstate_update_util_pid(struct update_util_data *data,
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
	u64 delta_ns = time - cpu->sample.time;
	u64 delta_ns = time - cpu->sample.time;


	/* Don't allow remote callbacks */
	if (smp_processor_id() != cpu->cpu)
		return;

	if ((s64)delta_ns < pid_params.sample_rate_ns)
	if ((s64)delta_ns < pid_params.sample_rate_ns)
		return;
		return;


@@ -1763,6 +1767,10 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
	u64 delta_ns;
	u64 delta_ns;


	/* Don't allow remote callbacks */
	if (smp_processor_id() != cpu->cpu)
		return;

	if (flags & SCHED_CPUFREQ_IOWAIT) {
	if (flags & SCHED_CPUFREQ_IOWAIT) {
		cpu->iowait_boost = int_tofp(1);
		cpu->iowait_boost = int_tofp(1);
	} else if (cpu->iowait_boost) {
	} else if (cpu->iowait_boost) {
+20 −0
Original line number Original line Diff line number Diff line
@@ -127,6 +127,15 @@ struct cpufreq_policy {
	 */
	 */
	unsigned int		transition_delay_us;
	unsigned int		transition_delay_us;


	/*
	 * Remote DVFS flag (Not added to the driver structure as we don't want
	 * to access another structure from scheduler hotpath).
	 *
	 * Should be set if CPUs can do DVFS on behalf of other CPUs from
	 * different cpufreq policies.
	 */
	bool			dvfs_possible_from_any_cpu;

	 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
	 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
	unsigned int cached_target_freq;
	unsigned int cached_target_freq;
	int cached_resolved_idx;
	int cached_resolved_idx;
@@ -562,6 +571,17 @@ struct governor_attr {
			 size_t count);
			 size_t count);
};
};


static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
{
	/*
	 * Allow remote callbacks if:
	 * - dvfs_possible_from_any_cpu flag is set
	 * - the local and remote CPUs share cpufreq policy
	 */
	return policy->dvfs_possible_from_any_cpu ||
		cpumask_test_cpu(smp_processor_id(), policy->cpus);
}

/*********************************************************************
/*********************************************************************
 *                     FREQUENCY TABLE HELPERS                       *
 *                     FREQUENCY TABLE HELPERS                       *
 *********************************************************************/
 *********************************************************************/
Loading