Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0652fc9 authored by Paul Mackerras's avatar Paul Mackerras
Browse files

powerpc: Unify the 32 and 64 bit idle loops



This unifies the 32-bit (ARCH=ppc and ARCH=powerpc) and 64-bit idle
loops.  It brings over the concept of having a ppc_md.power_save
function from 32-bit to ARCH=powerpc, which lets us get rid of
native_idle().  With this we will also be able to simplify the idle
handling for pSeries and cell.

Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 55aab8cd
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -12,12 +12,12 @@ endif

obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
				   irq.o align.o signal_32.o pmc.o vdso.o \
				   init_task.o process.o systbl.o
				   init_task.o process.o systbl.o idle.o
obj-y				+= vdso32/
obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
				   signal_64.o ptrace32.o \
				   paca.o cpu_setup_power4.o \
				   firmware.o sysfs.o idle_64.o
				   firmware.o sysfs.o
obj-$(CONFIG_PPC64)		+= vdso64/
obj-$(CONFIG_ALTIVEC)		+= vecemu.o vector.o
obj-$(CONFIG_POWER4)		+= idle_power4.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC)	+= smp-tbsync.o
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
obj-$(CONFIG_6xx)		+= idle_6xx.o

ifeq ($(CONFIG_PPC_MERGE),y)

@@ -51,7 +52,6 @@ obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_MULTIPLATFORM)	+= prom_init.o
obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
obj-$(CONFIG_6xx)		+= idle_6xx.o
obj-$(CONFIG_SMP)		+= smp.o
obj-$(CONFIG_KPROBES)		+= kprobes.o
obj-$(CONFIG_PPC_UDBG_16550)	+= legacy_serial.o udbg_16550.o
+6 −2
Original line number Diff line number Diff line
@@ -135,10 +135,10 @@ transfer_to_handler:
	mfspr	r11,SPRN_HID0
	mtcr	r11
BEGIN_FTR_SECTION
	bt-	8,power_save_6xx_restore	/* Check DOZE */
	bt-	8,4f			/* Check DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
	bt-	9,power_save_6xx_restore	/* Check NAP */
	bt-	9,4f			/* Check NAP */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#endif /* CONFIG_6xx */
	.globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
	SYNC
	RFI				/* jump to handler, enable MMU */

#ifdef CONFIG_6xx	
4:	b	power_save_6xx_restore
#endif

/*
 * On kernel stack overflow, load up an initial stack pointer
 * and call StackOverflow(regs), which should not return.
+40 −39
Original line number Diff line number Diff line
@@ -2,13 +2,17 @@
 * Idle daemon for PowerPC.  Idle daemon will handle any action
 * that needs to be taken when the system becomes idle.
 *
 * Originally Written by Cort Dougan (cort@cs.nmt.edu)
 * Originally written by Cort Dougan (cort@cs.nmt.edu).
 * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
 * Paul Mackerras and others.
 *
 * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
 *
 * Additional shared processor, SMT, and firmware support
 *    Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
 *
 * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
@@ -29,18 +33,43 @@
#include <asm/machdep.h>
#include <asm/smp.h>

extern void power4_idle(void);
#ifdef CONFIG_HOTPLUG_CPU
#define cpu_should_die()	(cpu_is_offline(smp_processor_id()) && \
				 system_state == SYSTEM_RUNNING)
#else
#define cpu_should_die()	0
#endif

void default_idle(void)
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	unsigned int cpu = smp_processor_id();
	set_thread_flag(TIF_POLLING_NRFLAG);
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		if (!need_resched()) {
			while (!need_resched() && !cpu_is_offline(cpu)) {
		ppc64_runlatch_off();

		while (!need_resched() && !cpu_should_die()) {
			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
@@ -48,44 +77,16 @@ void default_idle(void)
				HMT_low();
				HMT_very_low();
			}

			HMT_medium();
		}

		HMT_medium();
		ppc64_runlatch_on();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
		if (cpu_should_die())
			cpu_die();
	}
}

void native_idle(void)
{
	while (1) {
		ppc64_runlatch_off();

		if (!need_resched())
			power4_idle();

		if (need_resched()) {
			ppc64_runlatch_on();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}

		if (cpu_is_offline(smp_processor_id()) &&
		    system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}

void cpu_idle(void)
{
	BUG_ON(NULL == ppc_md.idle_loop);
	ppc_md.idle_loop();
}

int powersave_nap;
+0 −15
Original line number Diff line number Diff line
@@ -87,19 +87,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
	cmpwi	0,r3,0
	beqlr

	/* Clear MSR:EE */
	mfmsr	r7
	rlwinm	r0,r7,0,17,15
	mtmsr	r0

	/* Check current_thread_info()->flags */
	rlwinm	r4,r1,0,0,18
	lwz	r4,TI_FLAGS(r4)
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
	mtmsr	r7	/* out of line this ? */
	blr
1:	
	/* Some pre-nap cleanups needed on some CPUs */
	andis.	r0,r3,HID0_NAP@h
	beq	2f
@@ -220,8 +207,6 @@ _GLOBAL(nap_save_msscr0)
_GLOBAL(nap_save_hid1)
	.space	4*NR_CPUS

_GLOBAL(powersave_nap)
	.long	0
_GLOBAL(powersave_lowspeed)
	.long	0

+0 −15
Original line number Diff line number Diff line
@@ -49,21 +49,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
	cmpwi	0,r4,0
	beqlr

	/* Clear MSR:EE */
	mfmsr	r7
	li	r4,0
	ori	r4,r4,MSR_EE
	andc	r0,r7,r4
	mtmsrd	r0

	/* Check current_thread_info()->flags */
	clrrdi	r4,r1,THREAD_SHIFT
	ld	r4,TI_FLAGS(r4)
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	1f
	mtmsrd	r7	/* out of line this ? */
	blr
1:
	/* Go to NAP now */
BEGIN_FTR_SECTION
	DSSALL
Loading