Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4aada93e authored by Laura Abbott's avatar Laura Abbott
Browse files

arm64: Move cpu_resume_mmu to a head.S section



When turning on the mmu, there needs to be an identity
mapping of the current location as an intermediate
step. When CONFIG_STRICT_RWX is enabled, the .text
section gets pushed outside the original identity mapping.
Create an identity mapping for cpu_resume_mmu to ensure
this is always mapped.

Change-Id: I23f526e2828da548e273f266f15a303a4176a76d
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent a1fd97c5
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -307,6 +307,13 @@ __create_page_tables:
	create_pgd_entry x25, x0, x3, x5, x6
	create_block_map x0, x7, x3, x5, x5, idmap=1

	/*
	 * adrp is fine here, we don't really care about the lower order bits
	 */
	adrp	x3, cpu_resume_mmu
	create_pgd_entry x25, x0, x3, x5, x6
	create_block_map x0, x7, x3, x5, x5, idmap=1

	/*
	 * Map the kernel image (starting with PHYS_OFFSET).
	 */
@@ -519,6 +526,29 @@ ENTRY(__secondary_switched)
ENDPROC(__secondary_switched)
#endif	/* CONFIG_SMP */

	.align 3
ENTRY(cpu_resume_mmu)
	ldr	x3, =cpu_resume_after_mmu
	msr	sctlr_el1, x0		// restore sctlr_el1
	isb
	br	x3			// global jump to virtual address
ENDPROC(cpu_resume_mmu)

/*
 * x0 must contain the sctlr value retrieved from restored context
 */
cpu_resume_after_mmu:
	mov	x0, #0			// return zero on success
	ldp	x19, x20, [sp, #16]
	ldp	x21, x22, [sp, #32]
	ldp	x23, x24, [sp, #48]
	ldp	x25, x26, [sp, #64]
	ldp	x27, x28, [sp, #80]
	ldp	x29, lr, [sp], #96
	ret
ENDPROC(cpu_resume_after_mmu)


/*
 * Setup common bits before finally enabling the MMU. Essentially this is just
 * loading the page table pointer and vector base registers.
+0 −20
Original line number Diff line number Diff line
@@ -106,26 +106,6 @@ ENTRY(__cpu_suspend)
ENDPROC(__cpu_suspend)
	.ltorg

/*
 * x0 must contain the sctlr value retrieved from restored context
 */
ENTRY(cpu_resume_mmu)
	ldr	x3, =cpu_resume_after_mmu
	msr	sctlr_el1, x0		// restore sctlr_el1
	isb
	br	x3			// global jump to virtual address
ENDPROC(cpu_resume_mmu)
cpu_resume_after_mmu:
	mov	x0, #0			// return zero on success
	ldp	x19, x20, [sp, #16]
	ldp	x21, x22, [sp, #32]
	ldp	x23, x24, [sp, #48]
	ldp	x25, x26, [sp, #64]
	ldp	x27, x28, [sp, #80]
	ldp	x29, lr, [sp], #96
	ret
ENDPROC(cpu_resume_after_mmu)

ENTRY(cpu_resume)
	bl	el2_setup		// if in EL2 drop to EL1 cleanly
#ifdef CONFIG_SMP