Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc775dd8 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

sh: Use generic idle loop



Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: default avatarCc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Link: http://lkml.kernel.org/r/20130321215235.216323644@linutronix.de


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 99444202
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -33,6 +33,8 @@ config SUPERH
	select GENERIC_ATOMIC64
	select GENERIC_IRQ_SHOW
	select GENERIC_SMP_IDLE_THREAD
	select GENERIC_IDLE_LOOP
	select GENERIC_IDLE_POLL_SETUP
	select GENERIC_CLOCKEVENTS
	select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
	select GENERIC_STRNCPY_FROM_USER
+11 −90
Original line number Diff line number Diff line
@@ -24,98 +24,24 @@

static void (*sh_idle)(void);

static int hlt_counter;

static int __init nohlt_setup(char *__unused)
{
	hlt_counter = 1;
	return 1;
}
__setup("nohlt", nohlt_setup);

static int __init hlt_setup(char *__unused)
{
	hlt_counter = 0;
	return 1;
}
__setup("hlt", hlt_setup);

static inline int hlt_works(void)
{
	return !hlt_counter;
}

/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static void poll_idle(void)
{
	local_irq_enable();
	while (!need_resched())
		cpu_relax();
}

void default_idle(void)
{
	if (hlt_works()) {
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb__after_clear_bit();

	set_bl_bit();
		if (!need_resched()) {
	local_irq_enable();
	/* Isn't this racy ? */
	cpu_sleep();
		} else
			local_irq_enable();

		set_thread_flag(TIF_POLLING_NRFLAG);
	clear_bl_bit();
	} else
		poll_idle();
}

/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
void cpu_idle(void)
void arch_cpu_idle_dead(void)
{
	unsigned int cpu = smp_processor_id();

	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
	play_dead();
}

			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
void arch_cpu_idle(void)
{
	if (cpuidle_idle_call())
		sh_idle();
			/*
			 * Sanity check to ensure that sh_idle() returns
			 * with IRQs enabled
			 */
			WARN_ON(irqs_disabled());
			start_critical_timings();
		}

		rcu_idle_exit();
		tick_nohz_idle_exit();
		schedule_preempt_disabled();
	}
}

void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
	/*
	 * If a platform has set its own idle routine, leave it alone.
	 */
	if (sh_idle)
		return;

	if (hlt_works())
	if (!sh_idle)
		sh_idle = default_idle;
	else
		sh_idle = poll_idle;
}

void stop_this_cpu(void *unused)
+1 −1
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void)
	set_cpu_online(cpu, true);
	per_cpu(cpu_state, cpu) = CPU_ONLINE;

	cpu_idle();
	cpu_startup_entry(CPUHP_ONLINE);
}

extern struct {