Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 90ec95cd authored by Steve Capper's avatar Steve Capper Committed by Will Deacon
Browse files

arm64: mm: Introduce VA_BITS_MIN



In order to support 52-bit kernel addresses detectable at boot time, the
kernel needs to know the most conservative VA_BITS possible should it
need to fall back to this quantity due to lack of hardware support.

A new compile time constant VA_BITS_MIN is introduced in this patch and
it is employed in the KASAN end address, KASLR, and EFI stub.

For Arm, if 52-bit VA support is unavailable the fallback is to 48-bits.

In other words: VA_BITS_MIN = min (48, VA_BITS)

Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 99426e5e
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)

/*
 * On arm64, we have to ensure that the initrd ends up in the linear region,
 * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is
 * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
 * guaranteed to cover the kernel Image.
 *
 * Since the EFI stub is part of the kernel Image, we can relax the
@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
						    unsigned long image_addr)
{
	return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1));
	return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
}

#define efi_call_early(f, ...)		sys_table_arg->boottime->f(__VA_ARGS__)
+8 −1
Original line number Diff line number Diff line
@@ -52,6 +52,13 @@
#define PCI_IO_END		(VMEMMAP_START - SZ_2M)
#define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
#if VA_BITS > 48
#define VA_BITS_MIN		(48)
#else
#define VA_BITS_MIN		(VA_BITS)
#endif
#define _VA_START(va)		(UL(0xffffffffffffffff) - \
				(UL(1) << ((va) - 1)) + 1)

#define KERNEL_START      _text
#define KERNEL_END        _end
@@ -74,7 +81,7 @@
#define KASAN_THREAD_SHIFT	1
#else
#define KASAN_THREAD_SHIFT	0
#define KASAN_SHADOW_END	(VA_START)
#define KASAN_SHADOW_END	(_VA_START(VA_BITS_MIN))
#endif

#define MIN_THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@
 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
 */

#define DEFAULT_MAP_WINDOW_64	(UL(1) << VA_BITS)
#define DEFAULT_MAP_WINDOW_64	(UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64		(UL(1) << vabits_user)

#ifdef CONFIG_COMPAT
+1 −1
Original line number Diff line number Diff line
@@ -314,7 +314,7 @@ __create_page_tables:
	mov	x5, #52
	cbnz	x6, 1f
#endif
	mov	x5, #VA_BITS
	mov	x5, #VA_BITS_MIN
1:
	adr_l	x6, vabits_user
	str	x5, [x6]
+3 −3
Original line number Diff line number Diff line
@@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
	/*
	 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
	 * kernel image offset from the seed. Let's place the kernel in the
	 * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
	 * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
	 * the lower and upper quarters to avoid colliding with other
	 * allocations.
	 * Even if we could randomize at page granularity for 16k and 64k pages,
	 * let's always round to 2 MB so we don't interfere with the ability to
	 * map using contiguous PTEs
	 */
	mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
	offset = BIT(VA_BITS - 3) + (seed & mask);
	mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
	offset = BIT(VA_BITS_MIN - 3) + (seed & mask);

	/* use the top 16 bits to randomize the linear region */
	memstart_offset_seed = seed >> 48;
Loading