Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb6afcca authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Sami Tolvanen
Browse files

UPSTREAM: arm64: merge __enable_mmu and __turn_mmu_on



Enabling of the MMU is split into two functions, with an align and
a branch in the middle. On arm64, the entire kernel Image is ID mapped
so this is really not necessary, and we can just merge it into a
single function.

Also replaces an open coded adrp/add reference to __enable_mmu pair
with adr_l.

Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>

Bug: 31432001
Change-Id: Ia6c427090c272a7985949744d09dc27f3654e032
(cherry picked from commit 8b0a95753a34b5c8b2e483e0e5b1d67761e32c5f)
Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
parent 65360b91
Loading
Loading
Loading
Loading
+7 −26
Original line number Diff line number Diff line
@@ -250,8 +250,7 @@ ENTRY(stext)
	 */
	ldr	x27, =__mmap_switched		// address to jump to after
						// MMU has been enabled
	adrp	lr, __enable_mmu		// return (PIC) address
	add	lr, lr, #:lo12:__enable_mmu
	adr_l	lr, __enable_mmu		// return (PIC) address
	b	__cpu_setup			// initialise processor
ENDPROC(stext)

@@ -612,11 +611,12 @@ ENTRY(__secondary_switched)
ENDPROC(__secondary_switched)

/*
 * Setup common bits before finally enabling the MMU. Essentially this is just
 * loading the page table pointer and vector base registers.
 * Enable the MMU.
 *
 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
 * the MMU.
 *  x0  = SCTLR_EL1 value for turning on the MMU.
 *  x27 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
 */
__enable_mmu:
	ldr	x5, =vectors
@@ -624,25 +624,6 @@ __enable_mmu:
	msr	ttbr0_el1, x25			// load TTBR0
	msr	ttbr1_el1, x26			// load TTBR1
	isb
	b	__turn_mmu_on
ENDPROC(__enable_mmu)

/*
 * Enable the MMU. This completely changes the structure of the visible memory
 * space. You will not be able to trace execution through this.
 *
 *  x0  = system control register
 *  x27 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
 *
 * We align the entire function to the smallest power of two larger than it to
 * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
 * close to the end of a 512MB or 1GB block we might require an additional
 * table to map the entire function.
 */
	.align	4
__turn_mmu_on:
	msr	sctlr_el1, x0
	isb
	/*
@@ -654,7 +635,7 @@ __turn_mmu_on:
	dsb	nsh
	isb
	br	x27
ENDPROC(__turn_mmu_on)
ENDPROC(__enable_mmu)

/*
 * Calculate the start of physical memory.