Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f357e81b authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Sami Tolvanen
Browse files

UPSTREAM: arm64: mm: increase VA range of identity map



The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.

This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.

Tested-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Tested-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>

Bug: 31432001
Change-Id: I7635036e3413e59ef36dc4bf67527b873bafa446
(cherry picked from commit dd006da21646f1c86f0242eb8f527d093303127a)
Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
parent 28338b7c
Loading
Loading
Loading
Loading
+43 −0
Original line number Original line Diff line number Diff line
@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void)
	: "r" (ttbr));
	: "r" (ttbr));
}
}


/*
 * TCR.T0SZ value to use when the ID map is active. Usually equals
 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
 * physical memory, in which case it will be smaller.
 */
extern u64 idmap_t0sz;

static inline bool __cpu_uses_extended_idmap(void)
{
	return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
		unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
}

static inline void __cpu_set_tcr_t0sz(u64 t0sz)
{
	unsigned long tcr;

	if (__cpu_uses_extended_idmap())
		asm volatile (
		"	mrs	%0, tcr_el1	;"
		"	bfi	%0, %1, %2, %3	;"
		"	msr	tcr_el1, %0	;"
		"	isb"
		: "=&r" (tcr)
		: "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
}

/*
 * Set TCR.T0SZ to the value appropriate for activating the identity map.
 */
static inline void cpu_set_idmap_tcr_t0sz(void)
{
	__cpu_set_tcr_t0sz(idmap_t0sz);
}

/*
 * Set TCR.T0SZ to its default value (based on VA_BITS)
 */
static inline void cpu_set_default_tcr_t0sz(void)
{
	__cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS));
}

static inline void switch_new_context(struct mm_struct *mm)
static inline void switch_new_context(struct mm_struct *mm)
{
{
	unsigned long flags;
	unsigned long flags;
+4 −2
Original line number Original line Diff line number Diff line
@@ -39,7 +39,9 @@
 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
 * map the kernel. With the 64K page configuration, swapper and idmap need to
 * map the kernel. With the 64K page configuration, swapper and idmap need to
 * map to pte level. The swapper also maps the FDT (see __create_page_tables
 * map to pte level. The swapper also maps the FDT (see __create_page_tables
 * for more information).
 * for more information). Note that the number of ID map translation levels
 * could be increased on the fly if system RAM is out of reach for the default
 * VA range, so 3 pages are reserved in all cases.
 */
 */
#ifdef CONFIG_ARM64_64K_PAGES
#ifdef CONFIG_ARM64_64K_PAGES
#define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS)
#define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS)
@@ -48,7 +50,7 @@
#endif
#endif


#define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE		(SWAPPER_DIR_SIZE)
#define IDMAP_DIR_SIZE		(3 * PAGE_SIZE)


#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__


+6 −1
Original line number Original line Diff line number Diff line
@@ -142,7 +142,12 @@
/*
/*
 * TCR flags.
 * TCR flags.
 */
 */
#define TCR_TxSZ(x)		(((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0))
#define TCR_T0SZ_OFFSET		0
#define TCR_T1SZ_OFFSET		16
#define TCR_T0SZ(x)		((UL(64) - (x)) << TCR_T0SZ_OFFSET)
#define TCR_T1SZ(x)		((UL(64) - (x)) << TCR_T1SZ_OFFSET)
#define TCR_TxSZ(x)		(TCR_T0SZ(x) | TCR_T1SZ(x))
#define TCR_TxSZ_WIDTH		6
#define TCR_IRGN_NC		((UL(0) << 8) | (UL(0) << 24))
#define TCR_IRGN_NC		((UL(0) << 8) | (UL(0) << 24))
#define TCR_IRGN_WBWA		((UL(1) << 8) | (UL(1) << 24))
#define TCR_IRGN_WBWA		((UL(1) << 8) | (UL(1) << 24))
#define TCR_IRGN_WT		((UL(2) << 8) | (UL(2) << 24))
#define TCR_IRGN_WT		((UL(2) << 8) | (UL(2) << 24))
+37 −0
Original line number Original line Diff line number Diff line
@@ -378,6 +378,43 @@ __create_page_tables:
	 */
	 */
	mov	x0, x25				// idmap_pg_dir
	mov	x0, x25				// idmap_pg_dir
	adrp	x3, KERNEL_START		// __pa(KERNEL_START)
	adrp	x3, KERNEL_START		// __pa(KERNEL_START)

#ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
#define EXTRA_PTRS	(1 << (48 - EXTRA_SHIFT))

	/*
	 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
	 * created that covers system RAM if that is located sufficiently high
	 * in the physical address space. So for the ID map, use an extended
	 * virtual range in that case, by configuring an additional translation
	 * level.
	 * First, we have to verify our assumption that the current value of
	 * VA_BITS was chosen such that all translation levels are fully
	 * utilised, and that lowering T0SZ will always result in an additional
	 * translation level to be configured.
	 */
#if VA_BITS != EXTRA_SHIFT
#error "Mismatch between VA_BITS and page size/number of translation levels"
#endif

	/*
	 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
	 * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
	 * this number conveniently equals the number of leading zeroes in
	 * the physical address of KERNEL_END.
	 */
	adrp	x5, KERNEL_END
	clz	x5, x5
	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
	b.ge	1f			// .. then skip additional level

	str_l	x5, idmap_t0sz, x6

	create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
1:
#endif

	create_pgd_entry x0, x3, x5, x6
	create_pgd_entry x0, x3, x5, x6
	mov	x5, x3				// __pa(KERNEL_START)
	mov	x5, x3				// __pa(KERNEL_START)
	adr_l	x6, KERNEL_END			// __pa(KERNEL_END)
	adr_l	x6, KERNEL_END			// __pa(KERNEL_END)
+1 −0
Original line number Original line Diff line number Diff line
@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void)
	 */
	 */
	cpu_set_reserved_ttbr0();
	cpu_set_reserved_ttbr0();
	flush_tlb_all();
	flush_tlb_all();
	cpu_set_default_tcr_t0sz();


	preempt_disable();
	preempt_disable();
	trace_hardirqs_off();
	trace_hardirqs_off();
Loading