Loading arch/x86/include/asm/mwait.h +43 −0 Original line number Diff line number Diff line #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H #include <linux/sched.h> #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 Loading @@ -13,4 +15,45 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ asm volatile(".byte 0x0f, 0x01, 0xc8;" :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { if (!current_set_polling_and_test()) { if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { mb(); clflush((void *)¤t_thread_info()->flags); mb(); } __monitor((void *)¤t_thread_info()->flags, 0, 0); if (!need_resched()) __mwait(eax, ecx); } __current_clr_polling(); } #endif /* _ASM_X86_MWAIT_H */ arch/x86/include/asm/processor.h +0 −23 Original line number Diff line number Diff line Loading @@ -700,29 +700,6 @@ static inline void sync_core(void) #endif } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ asm volatile(".byte 0x0f, 0x01, 0xc8;" :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { trace_hardirqs_on(); /* "mwait %eax, %ecx;" */ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_amd_e400_c1e_mask(void); Loading arch/x86/kernel/acpi/cstate.c +0 −23 Original line number Diff line number Diff line Loading @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { if (!need_resched()) { if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(ax, cx); } } void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); Loading arch/x86/kernel/smpboot.c +2 −0 Original line number Diff line number Diff line Loading @@ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void) * The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. */ mb(); clflush(mwait_ptr); mb(); __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); Loading drivers/acpi/acpi_pad.c +1 −4 Original line number Diff line number Diff line Loading @@ -193,10 +193,7 @@ static int power_saving_thread(void *data) CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); stop_critical_timings(); __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(power_saving_mwait_eax, 1); mwait_idle_with_hints(power_saving_mwait_eax, 1); start_critical_timings(); if (lapic_marked_unstable) Loading Loading
arch/x86/include/asm/mwait.h +43 −0 Original line number Diff line number Diff line #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H #include <linux/sched.h> #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 Loading @@ -13,4 +15,45 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ asm volatile(".byte 0x0f, 0x01, 0xc8;" :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { if (!current_set_polling_and_test()) { if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { mb(); clflush((void *)¤t_thread_info()->flags); mb(); } __monitor((void *)¤t_thread_info()->flags, 0, 0); if (!need_resched()) __mwait(eax, ecx); } __current_clr_polling(); } #endif /* _ASM_X86_MWAIT_H */
arch/x86/include/asm/processor.h +0 −23 Original line number Diff line number Diff line Loading @@ -700,29 +700,6 @@ static inline void sync_core(void) #endif } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ asm volatile(".byte 0x0f, 0x01, 0xc8;" :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { trace_hardirqs_on(); /* "mwait %eax, %ecx;" */ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); } extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_amd_e400_c1e_mask(void); Loading
arch/x86/kernel/acpi/cstate.c +0 −23 Original line number Diff line number Diff line Loading @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { if (!need_resched()) { if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(ax, cx); } } void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); Loading
arch/x86/kernel/smpboot.c +2 −0 Original line number Diff line number Diff line Loading @@ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void) * The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. */ mb(); clflush(mwait_ptr); mb(); __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); Loading
drivers/acpi/acpi_pad.c +1 −4 Original line number Diff line number Diff line Loading @@ -193,10 +193,7 @@ static int power_saving_thread(void *data) CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); stop_critical_timings(); __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(power_saving_mwait_eax, 1); mwait_idle_with_hints(power_saving_mwait_eax, 1); start_critical_timings(); if (lapic_marked_unstable) Loading