Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea3d5226 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Revert "[PATCH] i386: add idle notifier"



This reverts commit 2ff2d3d7.

Uwe Bugla reports that he cannot mount a floppy drive any more, and Jiri
Slaby bisected it down to this commit.

Benjamin LaHaise also points out that this is a big hot-path, and that
interrupt delivery while idle is very common and should not go through
all these expensive gyrations.

Fix up conflicts in arch/i386/kernel/apic.c and arch/i386/kernel/irq.c
due to other unrelated irq changes.

Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Uwe Bugla <uwe.bugla@gmx.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9654640d
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -38,7 +38,6 @@
#include <asm/hpet.h>
#include <asm/i8253.h>
#include <asm/nmi.h>
#include <asm/idle.h>

#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -561,7 +560,6 @@ void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
	 * Besides, if we don't timer interrupts ignore the global
	 * interrupt lock, which is the WrongThing (tm) to do.
	 */
	exit_idle();
	irq_enter();
	local_apic_timer_interrupt();
	irq_exit();
@@ -1221,7 +1219,6 @@ void smp_spurious_interrupt(struct pt_regs *regs)
{
	unsigned long v;

	exit_idle();
	irq_enter();
	/*
	 * Check if this really is a spurious interrupt and ACK it
@@ -1245,7 +1242,6 @@ void smp_error_interrupt(struct pt_regs *regs)
{
	unsigned long v, v1;

	exit_idle();
	irq_enter();
	/* First tickle the hardware, only then report what went on. -- REW */
	v = apic_read(APIC_ESR);
+0 −2
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@
#include <asm/system.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/idle.h>

#include <asm/therm_throt.h>

@@ -60,7 +59,6 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm

fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
	exit_idle();
	irq_enter();
	vendor_thermal_interrupt(regs);
	irq_exit();
+0 −3
Original line number Diff line number Diff line
@@ -18,8 +18,6 @@
#include <linux/cpu.h>
#include <linux/delay.h>

#include <asm/idle.h>

#include <asm/apic.h>
#include <asm/uaccess.h>

@@ -77,7 +75,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
	union irq_ctx *curctx, *irqctx;
	u32 *isp;
#endif
	exit_idle();

	if (unlikely((unsigned)irq >= NR_IRQS)) {
		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+1 −52
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/vm86.h>
#include <asm/idle.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -82,42 +81,6 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);

static ATOMIC_NOTIFIER_HEAD(idle_notifier);

void idle_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&idle_notifier, n);
}

void idle_notifier_unregister(struct notifier_block *n)
{
	atomic_notifier_chain_unregister(&idle_notifier, n);
}

static DEFINE_PER_CPU(volatile unsigned long, idle_state);

void enter_idle(void)
{
	/* needs to be atomic w.r.t. interrupts, not against other CPUs */
	__set_bit(0, &__get_cpu_var(idle_state));
	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}

static void __exit_idle(void)
{
	/* needs to be atomic w.r.t. interrupts, not against other CPUs */
	if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
		return;
	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

void exit_idle(void)
{
	if (current->pid)
		return;
	__exit_idle();
}

void disable_hlt(void)
{
	hlt_counter++;
@@ -168,7 +131,6 @@ EXPORT_SYMBOL(default_idle);
 */
static void poll_idle (void)
{
	local_irq_enable();
	cpu_relax();
}

@@ -229,16 +191,7 @@ void cpu_idle(void)
				play_dead();

			__get_cpu_var(irq_stat).idle_timestamp = jiffies;

			/*
			 * Idle routines should keep interrupts disabled
			 * from here on, until they go to idle.
			 * Otherwise, idle callbacks can misfire.
			 */
			local_irq_disable();
			enter_idle();
			idle();
			__exit_idle();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
@@ -293,11 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
		__monitor((void *)&current_thread_info()->flags, 0, 0);
		smp_mb();
		if (!need_resched())
			__sti_mwait(eax, ecx);
		else
			local_irq_enable();
	} else {
		local_irq_enable();
			__mwait(eax, ecx);
	}
}

+0 −2
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@

#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/idle.h>
#include <mach_apic.h>

/*
@@ -624,7 +623,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
	/*
	 * At this point the info structure may be out of scope unless wait==1
	 */
	exit_idle();
	irq_enter();
	(*func)(info);
	irq_exit();
Loading