Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb1c9034 authored by Jean Pihet's avatar Jean Pihet Committed by Kevin Hilman
Browse files

OMAP3: ASM sleep code format rework



Cosmetic fixes to the code:
- white spaces and tabs,
- alignement,
- comments rephrase and typos,
- multi-line comments

Tested on N900 and Beagleboard with full RET and OFF modes,
using cpuidle and suspend.

Signed-off-by: default avatarJean Pihet <j-pihet@ti.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Tested-by: default avatarNishanth Menon <nm@ti.com>
Signed-off-by: default avatarKevin Hilman <khilman@deeprootsystems.com>
parent 83521291
Loading
Loading
Loading
Loading
+117 −107
Original line number Diff line number Diff line
/*
 * linux/arch/arm/mach-omap2/sleep.S
 *
 * (C) Copyright 2007
 * Texas Instruments
 * Karthik Dasu <karthik-dp@ti.com>
@@ -118,10 +116,10 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
	str	r1, l2dis_3630
	ldmfd	sp!, {pc}	@ restore regs and return

	.text
/* Function to call rom code to save secure ram context */
ENTRY(save_secure_ram_context)
	stmfd	sp!, {r1-r12, lr}	@ save registers on stack

	adr	r3, api_params		@ r3 points to parameters
	str	r0, [r3,#0x4]		@ r0 has sdram address
	ldr	r12, high_mask
@@ -165,8 +163,8 @@ ENTRY(save_secure_ram_context_sz)
 *
 *
 * Notes:
 * - this code gets copied to internal SRAM at boot. The execution pointer
 *   in SRAM is _omap_sram_idle.
 * - this code gets copied to internal SRAM at boot and after wake-up
 *   from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
 * - when the OMAP wakes up it continues at different execution points
 *   depending on the low power mode (non-OFF vs OFF modes),
 *   cf. 'Resume path for xxx mode' comments.
@@ -439,12 +437,13 @@ skipl2dis:
	.word	0xE1600071		@ call SMI monitor (smi #1)
#endif
	b	logic_l1_restore

l2_inv_api_params:
	.word	0x1, 0x00
l2_inv_gp:
	/* Execute smi to invalidate L2 cache */
	mov r12, #0x1                         @ set up to invalide L2
smi:    .word 0xE1600070		@ Call SMI monitor (smieq)
	mov r12, #0x1			@ set up to invalidate L2
	.word 0xE1600070		@ Call SMI monitor (smieq)
	/* Write to Aux control register to set some bits */
	ldr	r4, scratchpad_base
	ldr	r3, [r4,#0xBC]
@@ -458,15 +457,17 @@ smi: .word 0xE1600070 @ Call SMI monitor (smieq)
	.word	0xE1600070		@ Call SMI monitor (smieq)
logic_l1_restore:
	ldr	r1, l2dis_3630
	cmp	r1, #0x1	@ Do we need to re-enable L2 on 3630?
	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
	bne	skipl2reen
	mrc	p15, 0, r1, c1, c0, 1
	orr	r1, r1, #2		@ re-enable L2 cache
	mcr	p15, 0, r1, c1, c0, 1
skipl2reen:
	mov	r1, #0
	/* Invalidate all instruction caches to PoU
	 * and flush branch target cache */
	/*
	 * Invalidate all instruction caches to PoU
	 * and flush branch target cache
	 */
	mcr	p15, 0, r1, c7, c5, 0

	ldr	r4, scratchpad_base
@@ -487,13 +488,13 @@ skipl2reen:
	MCR p15, 0, r6, c2, c0, 1
	/* Translation table base control register */
	MCR p15, 0, r7, c2, c0, 2
	/*domain access Control Register */
	/* Domain access Control Register */
	MCR p15, 0, r8, c3, c0, 0
	/* data fault status Register */
	/* Data fault status Register */
	MCR p15, 0, r9, c5, c0, 0

	ldmia	r3!,{r4-r8}
	/* instruction fault status Register */
	/* Instruction fault status Register */
	MCR p15, 0, r4, c5, c0, 1
	/* Data Auxiliary Fault Status Register */
	MCR p15, 0, r5, c5, c1, 0
@@ -505,13 +506,13 @@ skipl2reen:
	MCR p15, 0, r8, c6, c0, 2
	ldmia	r3!,{r4-r7}

	/* user r/w thread and process ID */
	/* User r/w thread and process ID */
	MCR p15, 0, r4, c13, c0, 2
	/* user ro thread and process ID */
	/* User ro thread and process ID */
	MCR p15, 0, r5, c13, c0, 3
	/* Privileged only thread and process ID */
	MCR p15, 0, r6, c13, c0, 4
	/* cache size selection */
	/* Cache size selection */
	MCR p15, 2, r7, c0, c0, 0
	ldmia	r3!,{r4-r8}
	/* Data TLB lockdown registers */
@@ -526,23 +527,24 @@ skipl2reen:
	MCR p15, 0, r8, c13, c0, 1

	ldmia	r3!,{r4-r5}
	/* primary memory remap register */
	/* Primary memory remap register */
	MCR p15, 0, r4, c10, c2, 0
	/*normal memory remap register */
	/* Normal memory remap register */
	MCR p15, 0, r5, c10, c2, 1

	/* Restore cpsr */
	ldmia	r3!,{r4}	/*load CPSR from SDRAM*/
	msr	cpsr, r4	/*store cpsr */
	ldmia	r3!,{r4}		@ load CPSR from SDRAM
	msr	cpsr, r4		@ store cpsr

	/* Enabling MMU here */
	mrc	p15, 0, r7, c2, c0, 2 /* Read TTBRControl */
	mrc	p15, 0, r7, c2, c0, 2 	@ Read TTBRControl
	/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
	and	r7, #0x7
	cmp	r7, #0x0
	beq	usettbr0
ttbr_error:
	/* More work needs to be done to support N[0:2] value other than 0
	/*
	 * More work needs to be done to support N[0:2] value other than 0
	 * So looping here so that the error can be detected
	 */
	b	ttbr_error
@@ -552,21 +554,25 @@ usettbr0:
	and	r2, r5
	mov	r4, pc
	ldr	r5, table_index_mask
	and	r4, r5 /* r4 = 31 to 20 bits of pc */
	and	r4, r5			@ r4 = 31 to 20 bits of pc
	/* Extract the value to be written to table entry */
	ldr	r1, table_entry
	add	r1, r1, r4 /* r1 has value to be written to table entry*/
	/* r1 has the value to be written to table entry*/
	add	r1, r1, r4
	/* Getting the address of table entry to modify */
	lsr	r4, #18
	add	r2, r4 /* r2 has the location which needs to be modified */
	/* r2 has the location which needs to be modified */
	add	r2, r4
	/* Storing previous entry of location being modified */
	ldr	r5, scratchpad_base
	ldr	r4, [r2]
	str	r4, [r5, #0xC0]
	/* Modify the table entry */
	str	r1, [r2]
	/* Storing address of entry being modified
	 * - will be restored after enabling MMU */
	/*
	 * Storing address of entry being modified
	 * - will be restored after enabling MMU
	 */
	ldr	r5, scratchpad_base
	str	r2, [r5, #0xC4]

@@ -575,8 +581,11 @@ usettbr0:
	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate branch predictor array
	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate instruction TLB
	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate data TLB
	/* Restore control register  but dont enable caches here*/
	/* Caches will be enabled after restoring MMU table entry */
	/*
	 * Restore control register. This enables the MMU.
	 * The caches and prediction are not enabled here, they
	 * will be enabled after restoring the MMU table entry.
	 */
	ldmia	r3!, {r4}
	/* Store previous value of control register in scratchpad */
	str	r4, [r5, #0xC8]
@@ -655,7 +664,7 @@ ENTRY(es3_sdrc_fix_sz)
/* Make sure SDRC accesses are ok */
wait_sdrc_ok:

/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this. */
/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
	ldr	r4, cm_idlest_ckgen
wait_dpll3_lock:
	ldr	r5, [r4]
@@ -672,20 +681,21 @@ wait_sdrc_ready:
	ldr	r5, [r4]
	bic	r5, r5, #0x40
	str	r5, [r4]
is_dll_in_lock_mode:

is_dll_in_lock_mode:
	/* Is dll in lock mode? */
	ldr	r4, sdrc_dlla_ctrl
	ldr	r5, [r4]
	tst	r5, #0x4
        bxne    lr
	bxne	lr			@ Return if locked
	/* wait till dll locks */
wait_dll_lock_timed:
	ldr	r4, wait_dll_lock_counter
	add	r4, r4, #1
	str	r4, wait_dll_lock_counter
	ldr	r4, sdrc_dlla_status
        mov	r6, #8		/* Wait 20uS for lock */
	/* Wait 20uS for lock */
	mov	r6, #8
wait_dll_lock:
	subs	r6, r6, #0x1
	beq	kick_dll
@@ -693,17 +703,17 @@ wait_dll_lock:
	and	r5, r5, #0x4
	cmp	r5, #0x4
	bne	wait_dll_lock
        bx      lr
	bx	lr			@ Return when locked

	/* disable/reenable DLL if not locked */
kick_dll:
	ldr	r4, sdrc_dlla_ctrl
	ldr	r5, [r4]
	mov	r6, r5
	bic	r6, #(1<<3)	/* disable dll */
	bic	r6, #(1<<3)		@ disable dll
	str	r6, [r4]
	dsb
	orr	r6, r6, #(1<<3)	/* enable dll */
	orr	r6, r6, #(1<<3)		@ enable dll
	str	r6, [r4]
	dsb
	ldr	r4, kick_counter