Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29cb3cd2 authored by Russell King's avatar Russell King
Browse files

ARM: pm: allow suspend finisher to return error codes



There are SoCs where attempting to enter a low power state is ignored,
and the CPU continues executing instructions with all state preserved.
It is over-complex at that point to disable the MMU just to call the
resume path.

Instead, allow the suspend finisher to return error codes to abort
suspend in this circumstance, where the cpu_suspend internals will then
unwind the saved state on the stack.  Also omit the tlb flush as no
changes to the page tables will have happened.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent cbe26349
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -10,12 +10,13 @@ extern void cpu_resume(void);
 * Hide the first two arguments to __cpu_suspend - these are an implementation
 * Hide the first two arguments to __cpu_suspend - these are an implementation
 * detail which platform code shouldn't have to know about.
 * detail which platform code shouldn't have to know about.
 */
 */
static inline void cpu_suspend(unsigned long arg, void (*fn)(unsigned long))
static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
{
	extern void __cpu_suspend(int, long, unsigned long,
	extern int __cpu_suspend(int, long, unsigned long,
				  void (*)(unsigned long));
				 int (*)(unsigned long));
	__cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
	int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
	flush_tlb_all();
	flush_tlb_all();
	return ret;
}
}


#endif
#endif
+9 −2
Original line number Original line Diff line number Diff line
@@ -12,7 +12,6 @@
 *  r1 = v:p offset
 *  r1 = v:p offset
 *  r2 = suspend function arg0
 *  r2 = suspend function arg0
 *  r3 = suspend function
 *  r3 = suspend function
 * Note: does not return until system resumes
 */
 */
ENTRY(__cpu_suspend)
ENTRY(__cpu_suspend)
	stmfd	sp!, {r4 - r11, lr}
	stmfd	sp!, {r4 - r11, lr}
@@ -26,7 +25,7 @@ ENTRY(__cpu_suspend)
#endif
#endif
	mov	r6, sp			@ current virtual SP
	mov	r6, sp			@ current virtual SP
	sub	sp, sp, r5		@ allocate CPU state on stack
	sub	sp, sp, r5		@ allocate CPU state on stack
	mov	r0, sp			@ save pointer
	mov	r0, sp			@ save pointer to CPU save block
	add	ip, ip, r1		@ convert resume fn to phys
	add	ip, ip, r1		@ convert resume fn to phys
	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
	ldr	r5, =sleep_save_sp
	ldr	r5, =sleep_save_sp
@@ -55,10 +54,17 @@ ENTRY(__cpu_suspend)
#else
#else
	bl	__cpuc_flush_kern_all
	bl	__cpuc_flush_kern_all
#endif
#endif
	adr	lr, BSYM(cpu_suspend_abort)
	ldmfd	sp!, {r0, pc}		@ call suspend fn
	ldmfd	sp!, {r0, pc}		@ call suspend fn
ENDPROC(__cpu_suspend)
ENDPROC(__cpu_suspend)
	.ltorg
	.ltorg


cpu_suspend_abort:
	ldmia	sp!, {r1 - r3}		@ pop v:p, virt SP, phys resume fn
	mov	sp, r2
	ldmfd	sp!, {r4 - r11, pc}
ENDPROC(cpu_suspend_abort)

/*
/*
 * r0 = control register value
 * r0 = control register value
 * r1 = v:p offset (preserved by cpu_do_resume)
 * r1 = v:p offset (preserved by cpu_do_resume)
@@ -89,6 +95,7 @@ cpu_resume_after_mmu:
	str	r5, [r2, r4, lsl #2]	@ restore old mapping
	str	r5, [r2, r4, lsl #2]	@ restore old mapping
	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
	bl	cpu_init		@ restore the und/abt/irq banked regs
	bl	cpu_init		@ restore the und/abt/irq banked regs
	mov	r0, #0			@ return zero on success
	ldmfd	sp!, {r4 - r11, pc}
	ldmfd	sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
ENDPROC(cpu_resume_after_mmu)


+1 −1
Original line number Original line Diff line number Diff line
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = {
	SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
	SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
};
};


void exynos4_cpu_suspend(unsigned long arg)
static int exynos4_cpu_suspend(unsigned long arg)
{
{
	unsigned long tmp;
	unsigned long tmp;
	unsigned long mask = 0xFFFFFFFF;
	unsigned long mask = 0xFFFFFFFF;
+2 −1
Original line number Original line Diff line number Diff line
@@ -321,9 +321,10 @@ static void omap34xx_save_context(u32 *save)
	*save++ = val;
	*save++ = val;
}
}


static void omap34xx_do_sram_idle(unsigned long save_state)
static int omap34xx_do_sram_idle(unsigned long save_state)
{
{
	omap34xx_cpu_suspend(save_state);
	omap34xx_cpu_suspend(save_state);
	return 0;
}
}


void omap_sram_idle(void)
void omap_sram_idle(void)
+2 −2
Original line number Original line Diff line number Diff line
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns {
extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;


/* sleep.S */
/* sleep.S */
extern void pxa25x_finish_suspend(unsigned long);
extern int pxa25x_finish_suspend(unsigned long);
extern void pxa27x_finish_suspend(unsigned long);
extern int pxa27x_finish_suspend(unsigned long);


extern int pxa_pm_enter(suspend_state_t state);
extern int pxa_pm_enter(suspend_state_t state);
extern int pxa_pm_prepare(void);
extern int pxa_pm_prepare(void);
Loading