Loading drivers/acpi/processor_perflib.c +1 −1 Original line number Diff line number Diff line Loading @@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) acpi_processor_ppc_ost(pr->handle, 0); } if (ret >= 0) cpufreq_update_policy(pr->id); cpufreq_update_limits(pr->id); } int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) Loading drivers/cpufreq/amd_freq_sensitivity.c +1 −1 Original line number Diff line number Diff line Loading @@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void) PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); if (!pcidev) { if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; } Loading drivers/cpufreq/cpufreq.c +65 −19 Original line number Diff line number Diff line Loading @@ -34,11 +34,6 @@ static LIST_HEAD(cpufreq_policy_list); static inline bool policy_is_inactive(struct cpufreq_policy *policy) { return cpumask_empty(policy->cpus); } /* Macros to iterate over CPU policies */ #define for_each_suitable_policy(__policy, __active) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ Loading Loading @@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); /** * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). */ void cpufreq_cpu_release(struct cpufreq_policy *policy) { if (WARN_ON(!policy)) return; lockdep_assert_held(&policy->rwsem); up_write(&policy->rwsem); cpufreq_cpu_put(policy); } /** * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. * @cpu: CPU to find the policy for. * * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and * if the policy returned by it is not NULL, acquire its rwsem for writing. * Return the policy if it is active or release it and return NULL otherwise. * * The policy returned by this function has to be released with the help of * cpufreq_cpu_release() in order to release its rwsem and balance its usage * counter properly. */ struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); if (!policy) return NULL; down_write(&policy->rwsem); if (policy_is_inactive(policy)) { cpufreq_cpu_release(policy); return NULL; } return policy; } /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ Loading Loading @@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) return ret; } static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); /** * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access */ Loading Loading @@ -2229,7 +2266,7 @@ EXPORT_SYMBOL(cpufreq_get_policy); * * The cpuinfo part of @policy is not updated by this function. */ static int cpufreq_set_policy(struct cpufreq_policy *policy, int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { struct cpufreq_governor *old_gov; Loading Loading @@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, */ void cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy new_policy; if (!policy) return; down_write(&policy->rwsem); if (policy_is_inactive(policy)) goto unlock; /* * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change Loading @@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu) cpufreq_set_policy(policy, &new_policy); unlock: up_write(&policy->rwsem); cpufreq_cpu_put(policy); cpufreq_cpu_release(policy); } EXPORT_SYMBOL(cpufreq_update_policy); /** * cpufreq_update_limits - Update policy limits for a given CPU. * @cpu: CPU to update the policy limits for. * * Invoke the driver's ->update_limits callback if present or call * cpufreq_update_policy() for @cpu. */ void cpufreq_update_limits(unsigned int cpu) { if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(cpu); else cpufreq_update_policy(cpu); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); /********************************************************************* * BOOST * *********************************************************************/ Loading drivers/cpufreq/intel_pstate.c +56 −9 Original line number Diff line number Diff line Loading @@ -179,6 +179,7 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo Loading @@ -187,6 +188,7 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; Loading Loading @@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) u64 epb; int ret; if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); Loading @@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) { s16 epp; if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * When hwp_req_data is 0, means that caller didn't read * MSR_HWP_REQUEST, so need to read and get EPP. Loading @@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref) u64 epb; int ret; if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); Loading Loading @@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) if (epp < 0) return epp; if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { if (epp == HWP_EPP_PERFORMANCE) return 1; if (epp <= HWP_EPP_BALANCE_PERFORMANCE) Loading @@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) return 3; else return 4; } else if (static_cpu_has(X86_FEATURE_EPB)) { } else if (boot_cpu_has(X86_FEATURE_EPB)) { /* * Range: * 0x00-0x03 : Performance Loading Loading @@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, mutex_lock(&intel_pstate_limits_lock); if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { u64 value; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); Loading Loading @@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } update_epp: if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; } else { Loading @@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) value |= HWP_MIN_PERF(min_perf); /* Set EPP/EPB to min */ if (static_cpu_has(X86_FEATURE_HWP_EPP)) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); else intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); Loading Loading @@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } static void intel_pstate_update_max_freq(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy new_policy; struct cpudata *cpudata; if (!policy) return; cpudata = all_cpu_data[cpu]; policy->cpuinfo.max_freq = global.turbo_disabled_mf ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; memcpy(&new_policy, policy, sizeof(*policy)); new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq); new_policy.min = min(policy->user_policy.min, new_policy.max); cpufreq_set_policy(policy, &new_policy); cpufreq_cpu_release(policy); } static void intel_pstate_update_limits(unsigned int cpu) { mutex_lock(&intel_pstate_driver_lock); update_turbo_state(); /* * If turbo has been turned on or off globally, policy limits for * all CPUs need to be updated to reflect that. */ if (global.turbo_disabled_mf != global.turbo_disabled) { global.turbo_disabled_mf = global.turbo_disabled; for_each_possible_cpu(cpu) intel_pstate_update_max_freq(cpu); } else { cpufreq_update_policy(cpu); } mutex_unlock(&intel_pstate_driver_lock); } /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ Loading Loading @@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt as we don't process them */ if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); Loading Loading @@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; policy->cpuinfo.max_freq *= cpu->pstate.scaling; Loading Loading @@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = { .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; Loading Loading @@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = { .init = intel_cpufreq_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_cpufreq_stop_cpu, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; Loading drivers/cpufreq/powernow-k8.c +1 −1 Original line number Diff line number Diff line Loading @@ -1178,7 +1178,7 @@ static int powernowk8_init(void) unsigned int i, supported_cpus = 0; int ret; if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { __request_acpi_cpufreq(); return -ENODEV; } Loading Loading
drivers/acpi/processor_perflib.c +1 −1 Original line number Diff line number Diff line Loading @@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) acpi_processor_ppc_ost(pr->handle, 0); } if (ret >= 0) cpufreq_update_policy(pr->id); cpufreq_update_limits(pr->id); } int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) Loading
drivers/cpufreq/amd_freq_sensitivity.c +1 −1 Original line number Diff line number Diff line Loading @@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void) PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); if (!pcidev) { if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; } Loading
drivers/cpufreq/cpufreq.c +65 −19 Original line number Diff line number Diff line Loading @@ -34,11 +34,6 @@ static LIST_HEAD(cpufreq_policy_list); static inline bool policy_is_inactive(struct cpufreq_policy *policy) { return cpumask_empty(policy->cpus); } /* Macros to iterate over CPU policies */ #define for_each_suitable_policy(__policy, __active) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ Loading Loading @@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); /** * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). */ void cpufreq_cpu_release(struct cpufreq_policy *policy) { if (WARN_ON(!policy)) return; lockdep_assert_held(&policy->rwsem); up_write(&policy->rwsem); cpufreq_cpu_put(policy); } /** * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. * @cpu: CPU to find the policy for. * * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and * if the policy returned by it is not NULL, acquire its rwsem for writing. * Return the policy if it is active or release it and return NULL otherwise. * * The policy returned by this function has to be released with the help of * cpufreq_cpu_release() in order to release its rwsem and balance its usage * counter properly. */ struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); if (!policy) return NULL; down_write(&policy->rwsem); if (policy_is_inactive(policy)) { cpufreq_cpu_release(policy); return NULL; } return policy; } /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ Loading Loading @@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) return ret; } static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); /** * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access */ Loading Loading @@ -2229,7 +2266,7 @@ EXPORT_SYMBOL(cpufreq_get_policy); * * The cpuinfo part of @policy is not updated by this function. */ static int cpufreq_set_policy(struct cpufreq_policy *policy, int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { struct cpufreq_governor *old_gov; Loading Loading @@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, */ void cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy new_policy; if (!policy) return; down_write(&policy->rwsem); if (policy_is_inactive(policy)) goto unlock; /* * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change Loading @@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu) cpufreq_set_policy(policy, &new_policy); unlock: up_write(&policy->rwsem); cpufreq_cpu_put(policy); cpufreq_cpu_release(policy); } EXPORT_SYMBOL(cpufreq_update_policy); /** * cpufreq_update_limits - Update policy limits for a given CPU. * @cpu: CPU to update the policy limits for. * * Invoke the driver's ->update_limits callback if present or call * cpufreq_update_policy() for @cpu. */ void cpufreq_update_limits(unsigned int cpu) { if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(cpu); else cpufreq_update_policy(cpu); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); /********************************************************************* * BOOST * *********************************************************************/ Loading
drivers/cpufreq/intel_pstate.c +56 −9 Original line number Diff line number Diff line Loading @@ -179,6 +179,7 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo Loading @@ -187,6 +188,7 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; Loading Loading @@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) u64 epb; int ret; if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); Loading @@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) { s16 epp; if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * When hwp_req_data is 0, means that caller didn't read * MSR_HWP_REQUEST, so need to read and get EPP. Loading @@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref) u64 epb; int ret; if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); Loading Loading @@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) if (epp < 0) return epp; if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { if (epp == HWP_EPP_PERFORMANCE) return 1; if (epp <= HWP_EPP_BALANCE_PERFORMANCE) Loading @@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) return 3; else return 4; } else if (static_cpu_has(X86_FEATURE_EPB)) { } else if (boot_cpu_has(X86_FEATURE_EPB)) { /* * Range: * 0x00-0x03 : Performance Loading Loading @@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, mutex_lock(&intel_pstate_limits_lock); if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { u64 value; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); Loading Loading @@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } update_epp: if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; } else { Loading @@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) value |= HWP_MIN_PERF(min_perf); /* Set EPP/EPB to min */ if (static_cpu_has(X86_FEATURE_HWP_EPP)) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); else intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); Loading Loading @@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } static void intel_pstate_update_max_freq(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy new_policy; struct cpudata *cpudata; if (!policy) return; cpudata = all_cpu_data[cpu]; policy->cpuinfo.max_freq = global.turbo_disabled_mf ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; memcpy(&new_policy, policy, sizeof(*policy)); new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq); new_policy.min = min(policy->user_policy.min, new_policy.max); cpufreq_set_policy(policy, &new_policy); cpufreq_cpu_release(policy); } static void intel_pstate_update_limits(unsigned int cpu) { mutex_lock(&intel_pstate_driver_lock); update_turbo_state(); /* * If turbo has been turned on or off globally, policy limits for * all CPUs need to be updated to reflect that. */ if (global.turbo_disabled_mf != global.turbo_disabled) { global.turbo_disabled_mf = global.turbo_disabled; for_each_possible_cpu(cpu) intel_pstate_update_max_freq(cpu); } else { cpufreq_update_policy(cpu); } mutex_unlock(&intel_pstate_driver_lock); } /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ Loading Loading @@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt as we don't process them */ if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); Loading Loading @@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; policy->cpuinfo.max_freq *= cpu->pstate.scaling; Loading Loading @@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = { .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; Loading Loading @@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = { .init = intel_cpufreq_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_cpufreq_stop_cpu, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; Loading
drivers/cpufreq/powernow-k8.c +1 −1 Original line number Diff line number Diff line Loading @@ -1178,7 +1178,7 @@ static int powernowk8_init(void) unsigned int i, supported_cpus = 0; int ret; if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { __request_acpi_cpufreq(); return -ENODEV; } Loading