Loading include/linux/cpufreq.h +9 −0 Original line number Diff line number Diff line Loading @@ -549,6 +549,15 @@ static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); } static inline void cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) { if (policy->max < policy->cur) cpufreq_driver_fast_switch(policy, policy->max); else if (policy->min > policy->cur) cpufreq_driver_fast_switch(policy, policy->min); } /* Governor attribute set */ struct gov_attr_set { struct kobject kobj; Loading kernel/sched/cpufreq_schedutil.c +6 −0 Original line number Diff line number Diff line Loading @@ -1075,6 +1075,12 @@ static void sugov_limits(struct cpufreq_policy *policy) raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); cpufreq_policy_apply_limits(policy); mutex_unlock(&sg_policy->work_lock); } else { raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, ktime_get_ns()); cpufreq_policy_apply_limits_fast(policy); raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); } sg_policy->need_freq_update = true; Loading Loading
include/linux/cpufreq.h +9 −0 Original line number Diff line number Diff line Loading @@ -549,6 +549,15 @@ static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); } static inline void cpufreq_policy_apply_limits_fast(struct cpufreq_policy *policy) { if (policy->max < policy->cur) cpufreq_driver_fast_switch(policy, policy->max); else if (policy->min > policy->cur) cpufreq_driver_fast_switch(policy, policy->min); } /* Governor attribute set */ struct gov_attr_set { struct kobject kobj; Loading
kernel/sched/cpufreq_schedutil.c +6 −0 Original line number Diff line number Diff line Loading @@ -1075,6 +1075,12 @@ static void sugov_limits(struct cpufreq_policy *policy) raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); cpufreq_policy_apply_limits(policy); mutex_unlock(&sg_policy->work_lock); } else { raw_spin_lock_irqsave(&sg_policy->update_lock, flags); sugov_track_cycles(sg_policy, sg_policy->policy->cur, ktime_get_ns()); cpufreq_policy_apply_limits_fast(policy); raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); } sg_policy->need_freq_update = true; Loading