Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87fa05ae authored by Sam Ravnborg's avatar Sam Ravnborg Committed by Thomas Gleixner
Browse files

sparc: Use generic idle loop



Add generic cpu_idle support

sparc32:
- replace call to cpu_idle() with cpu_startup_entry()
- add arch_cpu_idle()

sparc64:
- smp_callin() now include cpu_startup_entry() call so we can
  skip calling cpu_idle from assembler
- add arch_cpu_idle() and arch_cpu_idle_dead()

Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Reviewed-by: default avatar"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
Cc: torvalds@linux-foundation.org
Cc: rusty@rustcorp.com.au
Cc: paulmck@linux.vnet.ibm.com
Cc: peterz@infradead.org
Cc: magnus.damm@gmail.com
Acked-by: default avatarDavid Miller <davem@davemloft.net>
Link: http://lkml.kernel.org/r/20130411193850.GA2330@merkur.ravnborg.org


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 781b0e87
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ config SPARC
	select GENERIC_SMP_IDLE_THREAD
	select GENERIC_CMOS_UPDATE
	select GENERIC_CLOCKEVENTS
	select GENERIC_IDLE_LOOP
	select GENERIC_STRNCPY_FROM_USER
	select GENERIC_STRNLEN_USER
	select MODULES_USE_ELF_RELA
+1 −2
Original line number Diff line number Diff line
@@ -128,8 +128,7 @@ hv_cpu_startup:

	call		smp_callin
	 nop
	call		cpu_idle
	 mov		0, %o0

	call		cpu_panic
	 nop

+5 −16
Original line number Diff line number Diff line
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS];

/*
 * the idle loop on a Sparc... ;)
 */
void cpu_idle(void)
/* Idle loop support. */
void arch_cpu_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	for (;;) {
		while (!need_resched()) {
	if (sparc_idle)
		(*sparc_idle)();
			else
				cpu_relax();
		}
		schedule_preempt_disabled();
	}
	local_irq_enable();
}

/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
+13 −36
Original line number Diff line number Diff line
@@ -52,20 +52,17 @@

#include "kstack.h"

static void sparc64_yield(int cpu)
/* Idle loop support on sparc64. */
void arch_cpu_idle(void)
{
	if (tlb_type != hypervisor) {
		touch_nmi_watchdog();
		return;
	}

	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb__after_clear_bit();

	while (!need_resched() && !cpu_is_offline(cpu)) {
	} else {
		unsigned long pstate;

		/* Disable interrupts. */
                /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
                 * the cpu sleep hypervisor call.
                 */
		__asm__ __volatile__(
			"rdpr %%pstate, %0\n\t"
			"andn %0, %1, %0\n\t"
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
			: "=&r" (pstate)
			: "i" (PSTATE_IE));

		if (!need_resched() && !cpu_is_offline(cpu))
		if (!need_resched() && !cpu_is_offline(smp_processor_id()))
			sun4v_cpu_yield();

		/* Re-enable interrupts. */
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
			: "=&r" (pstate)
			: "i" (PSTATE_IE));
	}

	set_thread_flag(TIF_POLLING_NRFLAG);
	local_irq_enable();
}

/* The idle loop on sparc64. */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	set_thread_flag(TIF_POLLING_NRFLAG);

	while(1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_is_offline(cpu))
			sparc64_yield(cpu);

		rcu_idle_exit();
		tick_nohz_idle_exit();

#ifdef CONFIG_HOTPLUG_CPU
		if (cpu_is_offline(cpu)) {
void arch_cpu_idle_dead()
{
	sched_preempt_enable_no_resched();
	cpu_play_dead();
}
#endif
		schedule_preempt_disabled();
	}
}

#ifdef CONFIG_COMPAT
static void show_regwindow32(struct pt_regs *regs)
+1 −1
Original line number Diff line number Diff line
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
	local_irq_enable();

	wmb();
	cpu_idle();
	cpu_startup_entry(CPUHP_ONLINE);

	/* We should never reach here! */
	BUG();
Loading