Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b0a9575 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Will Deacon
Browse files

arm64: merge __enable_mmu and __turn_mmu_on



Enabling of the MMU is split into two functions, with an align and
a branch in the middle. On arm64, the entire kernel Image is ID mapped
so this is really not necessary, and we can just merge it into a
single function.

Also replaces an open coded adrp/add reference to __enable_mmu pair
with adr_l.

Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent b1c98297
Loading
Loading
Loading
Loading
+7 −26
Original line number Original line Diff line number Diff line
@@ -255,8 +255,7 @@ ENTRY(stext)
	 */
	 */
	ldr	x27, =__mmap_switched		// address to jump to after
	ldr	x27, =__mmap_switched		// address to jump to after
						// MMU has been enabled
						// MMU has been enabled
	adrp	lr, __enable_mmu		// return (PIC) address
	adr_l	lr, __enable_mmu		// return (PIC) address
	add	lr, lr, #:lo12:__enable_mmu
	b	__cpu_setup			// initialise processor
	b	__cpu_setup			// initialise processor
ENDPROC(stext)
ENDPROC(stext)


@@ -615,11 +614,12 @@ ENDPROC(__secondary_switched)
#endif	/* CONFIG_SMP */
#endif	/* CONFIG_SMP */


/*
/*
 * Setup common bits before finally enabling the MMU. Essentially this is just
 * Enable the MMU.
 * loading the page table pointer and vector base registers.
 *
 *
 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
 *  x0  = SCTLR_EL1 value for turning on the MMU.
 * the MMU.
 *  x27 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
 */
 */
__enable_mmu:
__enable_mmu:
	ldr	x5, =vectors
	ldr	x5, =vectors
@@ -627,29 +627,10 @@ __enable_mmu:
	msr	ttbr0_el1, x25			// load TTBR0
	msr	ttbr0_el1, x25			// load TTBR0
	msr	ttbr1_el1, x26			// load TTBR1
	msr	ttbr1_el1, x26			// load TTBR1
	isb
	isb
	b	__turn_mmu_on
ENDPROC(__enable_mmu)

/*
 * Enable the MMU. This completely changes the structure of the visible memory
 * space. You will not be able to trace execution through this.
 *
 *  x0  = system control register
 *  x27 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
 *
 * We align the entire function to the smallest power of two larger than it to
 * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
 * close to the end of a 512MB or 1GB block we might require an additional
 * table to map the entire function.
 */
	.align	4
__turn_mmu_on:
	msr	sctlr_el1, x0
	msr	sctlr_el1, x0
	isb
	isb
	br	x27
	br	x27
ENDPROC(__turn_mmu_on)
ENDPROC(__enable_mmu)


/*
/*
 * Calculate the start of physical memory.
 * Calculate the start of physical memory.