Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2ff2d3d7 authored by Stephane Eranian's avatar Stephane Eranian Committed by Andi Kleen
Browse files

[PATCH] i386: add idle notifier



Add a notifier mechanism to the low level idle loop.  You can register a
callback function which gets invoked on entry and exit from the low level idle
loop.  The low level idle loop is defined as the polling loop, low-power call,
or the mwait instruction.  Interrupts processed by the idle thread are not
considered part of the low level loop.

The notifier can be used to measure precisely how much is spent in useless
execution (or low power mode).  The perfmon subsystem uses it to turn on/off
monitoring.

Signed-off-by: default avatarstephane eranian <eranian@hpl.hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent 86a97883
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#include <asm/hpet.h>
#include <asm/i8253.h>
#include <asm/nmi.h>
#include <asm/idle.h>

#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -1255,6 +1256,7 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
	 * Besides, if we don't timer interrupts ignore the global
	 * interrupt lock, which is the WrongThing (tm) to do.
	 */
	exit_idle();
	irq_enter();
	smp_local_timer_interrupt();
	irq_exit();
@@ -1305,6 +1307,7 @@ fastcall void smp_spurious_interrupt(struct pt_regs *regs)
{
	unsigned long v;

	exit_idle();
	irq_enter();
	/*
	 * Check if this really is a spurious interrupt and ACK it
@@ -1329,6 +1332,7 @@ fastcall void smp_error_interrupt(struct pt_regs *regs)
{
	unsigned long v, v1;

	exit_idle();
	irq_enter();
	/* First tickle the hardware, only then report what went on. -- REW */
	v = apic_read(APIC_ESR);
+2 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <asm/system.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/idle.h>

#include <asm/therm_throt.h>

@@ -59,6 +60,7 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm

fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
	exit_idle();
	irq_enter();
	vendor_thermal_interrupt(regs);
	irq_exit();
+3 −0
Original line number Diff line number Diff line
@@ -19,6 +19,8 @@
#include <linux/cpu.h>
#include <linux/delay.h>

#include <asm/idle.h>

DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
EXPORT_PER_CPU_SYMBOL(irq_stat);

@@ -61,6 +63,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
	union irq_ctx *curctx, *irqctx;
	u32 *isp;
#endif
	exit_idle();

	if (unlikely((unsigned)irq >= NR_IRQS)) {
		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+52 −1
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/vm86.h>
#include <asm/idle.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -80,6 +81,42 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);

static ATOMIC_NOTIFIER_HEAD(idle_notifier);

void idle_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&idle_notifier, n);
}

void idle_notifier_unregister(struct notifier_block *n)
{
	atomic_notifier_chain_unregister(&idle_notifier, n);
}

static DEFINE_PER_CPU(volatile unsigned long, idle_state);

void enter_idle(void)
{
	/* needs to be atomic w.r.t. interrupts, not against other CPUs */
	__set_bit(0, &__get_cpu_var(idle_state));
	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}

static void __exit_idle(void)
{
	/* needs to be atomic w.r.t. interrupts, not against other CPUs */
	if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
		return;
	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

void exit_idle(void)
{
	if (current->pid)
		return;
	__exit_idle();
}

void disable_hlt(void)
{
	hlt_counter++;
@@ -130,6 +167,7 @@ EXPORT_SYMBOL(default_idle);
 */
static void poll_idle (void)
{
	local_irq_enable();
	cpu_relax();
}

@@ -189,7 +227,16 @@ void cpu_idle(void)
				play_dead();

			__get_cpu_var(irq_stat).idle_timestamp = jiffies;

			/*
			 * Idle routines should keep interrupts disabled
			 * from here on, until they go to idle.
			 * Otherwise, idle callbacks can misfire.
			 */
			local_irq_disable();
			enter_idle();
			idle();
			__exit_idle();
		}
		preempt_enable_no_resched();
		schedule();
@@ -243,7 +290,11 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
		__monitor((void *)&current_thread_info()->flags, 0, 0);
		smp_mb();
		if (!need_resched())
			__mwait(eax, ecx);
			__sti_mwait(eax, ecx);
		else
			local_irq_enable();
	} else {
		local_irq_enable();
	}
}

+2 −0
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@

#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/idle.h>
#include <mach_apic.h>

/*
@@ -624,6 +625,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
	/*
	 * At this point the info structure may be out of scope unless wait==1
	 */
	exit_idle();
	irq_enter();
	(*func)(info);
	irq_exit();
Loading