Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d1a9417 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86: Use generic idle loop



Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: default avatarSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130321215235.486594473@linutronix.de


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
parent aba92c9e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -97,6 +97,7 @@ config X86
	select GENERIC_IOMAP
	select DCACHE_WORD_ACCESS
	select GENERIC_SMP_IDLE_THREAD
	select GENERIC_IDLE_LOOP
	select ARCH_WANT_IPC_PARSE_VERSION if X86_32
	select HAVE_ARCH_SECCOMP_FILTER
	select BUILDTIME_EXTABLE_SORT
+27 −78
Original line number Diff line number Diff line
@@ -301,13 +301,7 @@ void exit_idle(void)
}
#endif

/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
void arch_cpu_idle_prepare(void)
{
	/*
	 * If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
	 * canaries already on the stack wont ever trigger).
	 */
	boot_init_stack_canary();
	current_thread_info()->status |= TS_POLLING;
}

	while (1) {
		tick_nohz_idle_enter();
void arch_cpu_idle_enter(void)
{
	local_touch_nmi();
	enter_idle();
}

		while (!need_resched()) {
			rmb();
void arch_cpu_idle_exit(void)
{
	__exit_idle();
}

			if (cpu_is_offline(smp_processor_id()))
void arch_cpu_idle_dead(void)
{
	play_dead();
}

/*
			 * Idle routines should keep interrupts disabled
			 * from here on, until they go to idle.
			 * Otherwise, idle callbacks can misfire.
 * Called from the generic idle code.
 */
			local_touch_nmi();
			local_irq_disable();

			enter_idle();

			/* Don't trace irqs off for idle */
			stop_critical_timings();

			/* enter_idle() needs rcu for notifiers */
			rcu_idle_enter();

void arch_cpu_idle(void)
{
	if (cpuidle_idle_call())
		x86_idle();

			rcu_idle_exit();
			start_critical_timings();

			/* In many cases the interrupt that ended idle
			   has already called exit_idle. But some idle
			   loops can be woken up without interrupt. */
			__exit_idle();
		}

		tick_nohz_idle_exit();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}

/*
 * We use this if we don't have any better
 * idle routine..
 * We use this if we don't have any better idle routine..
 */
void default_idle(void)
{
	trace_cpu_idle_rcuidle(1, smp_processor_id());
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we
	 * test NEED_RESCHED:
	 */
	smp_mb();

	if (!need_resched())
		safe_halt();	/* enables interrupts racelessly */
	else
		local_irq_enable();
	current_thread_info()->status |= TS_POLLING;
	safe_halt();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
		halt();
}

/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static void poll_idle(void)
{
	trace_cpu_idle_rcuidle(0, smp_processor_id());
	local_irq_enable();
	while (!need_resched())
		cpu_relax();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}

bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);

@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
	if (x86_idle == poll_idle && smp_num_siblings > 1)
	if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
		pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif
	if (x86_idle)
	if (x86_idle || boot_option_idle_override == IDLE_POLL)
		return;

	if (cpu_has_amd_erratum(amd_erratum_400)) {
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)

	if (!strcmp(str, "poll")) {
		pr_info("using polling idle threads\n");
		x86_idle = poll_idle;
		boot_option_idle_override = IDLE_POLL;
		cpu_idle_poll_ctrl(true);
	} else if (!strcmp(str, "halt")) {
		/*
		 * When the boot option of idle=halt is added, halt is
+1 −1
Original line number Diff line number Diff line
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused)
	x86_cpuinit.setup_percpu_clockev();

	wmb();
	cpu_idle();
	cpu_startup_entry(CPUHP_ONLINE);
}

void __init smp_store_boot_cpu_info(void)
+1 −1
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void)
static void __cpuinit cpu_bringup_and_idle(void)
{
	cpu_bringup();
	cpu_idle();
	cpu_startup_entry(CPUHP_ONLINE);
}

static int xen_smp_intr_init(unsigned int cpu)