Loading include/linux/cpufreq.h +8 −4 Original line number Diff line number Diff line Loading @@ -541,13 +541,17 @@ static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); } static inline void cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) static inline unsigned int cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) { unsigned int ret = 0; if (policy->max < policy->cur) cpufreq_driver_fast_switch(policy, policy->max); ret = cpufreq_driver_fast_switch(policy, policy->max); else if (policy->min > policy->cur) cpufreq_driver_fast_switch(policy, policy->min); ret = cpufreq_driver_fast_switch(policy, policy->min); return ret; } /* Governor attribute set */ Loading kernel/sched/cpufreq_schedutil.c +8 −1 Original line number Diff line number Diff line Loading @@ -1067,6 +1067,8 @@ static void sugov_limits(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy = policy->governor_data; unsigned long flags; unsigned int ret; int cpu; if (!policy->fast_switch_enabled) { mutex_lock(&sg_policy->work_lock); Loading @@ -1080,7 +1082,12 @@ static void sugov_limits(struct cpufreq_policy *policy) raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, ktime_get_ns()); cpufreq_policy_apply_limits_fast(policy); ret = cpufreq_policy_apply_limits_fast(policy); if (ret && policy->cur != ret) { policy->cur = ret; for_each_cpu(cpu, policy->cpus) trace_cpu_frequency(ret, cpu); } raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); } Loading Loading
include/linux/cpufreq.h +8 −4 Original line number Diff line number Diff line Loading @@ -541,13 +541,17 @@ static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); } static inline void cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) static inline unsigned int cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) { unsigned int ret = 0; if (policy->max < policy->cur) cpufreq_driver_fast_switch(policy, policy->max); ret = cpufreq_driver_fast_switch(policy, policy->max); else if (policy->min > policy->cur) cpufreq_driver_fast_switch(policy, policy->min); ret = cpufreq_driver_fast_switch(policy, policy->min); return ret; } /* Governor attribute set */ Loading
kernel/sched/cpufreq_schedutil.c +8 −1 Original line number Diff line number Diff line Loading @@ -1067,6 +1067,8 @@ static void sugov_limits(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy = policy->governor_data; unsigned long flags; unsigned int ret; int cpu; if (!policy->fast_switch_enabled) { mutex_lock(&sg_policy->work_lock); Loading @@ -1080,7 +1082,12 @@ static void sugov_limits(struct cpufreq_policy *policy) raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, ktime_get_ns()); cpufreq_policy_apply_limits_fast(policy); ret = cpufreq_policy_apply_limits_fast(policy); if (ret && policy->cur != ret) { policy->cur = ret; for_each_cpu(cpu, policy->cpus) trace_cpu_frequency(ret, cpu); } raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); } Loading