Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f80fb3a3 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas
Browse files

arm64: add support for kernel ASLR



This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.

If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.

If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)

Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 1e48ef7f
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -798,6 +798,35 @@ config RELOCATABLE
	  relocation pass at runtime even if the kernel is loaded at the
	  same address it was linked at.

config RANDOMIZE_BASE
	bool "Randomize the address of the kernel image"
	select ARM64_MODULE_PLTS
	select RELOCATABLE
	help
	  Randomizes the virtual address at which the kernel image is
	  loaded, as a security feature that deters exploit attempts
	  relying on knowledge of the location of kernel internals.

	  It is the bootloader's job to provide entropy, by passing a
	  random u64 value in /chosen/kaslr-seed at kernel entry.

	  If unsure, say N.

config RANDOMIZE_MODULE_REGION_FULL
	bool "Randomize the module region independently from the core kernel"
	depends on RANDOMIZE_BASE
	default y
	help
	  Randomizes the location of the module region without considering the
	  location of the core kernel. This way, it is impossible for modules
	  to leak information about the location of core kernel data structures
	  but it does imply that function calls between modules and the core
	  kernel will need to be resolved via veneers in the module PLT.

	  When this option is not set, the module region will be randomized over
	  a limited range that contains the [_stext, _etext] interval of the
	  core kernel, so branch relocations are always in range.

endmenu

menu "Boot options"
+4 −1
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@
#define KIMAGE_VADDR		(MODULES_END)
#define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR		(VA_START + KASAN_SHADOW_SIZE)
#define MODULES_VSIZE		(SZ_64M)
#define MODULES_VSIZE		(SZ_128M)
#define PCI_IO_END		(PAGE_OFFSET - SZ_2M)
#define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
@@ -139,6 +139,9 @@ extern phys_addr_t memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET		({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })

/* the virtual base of the kernel image (minus TEXT_OFFSET) */
extern u64			kimage_vaddr;

/* the offset between the kernel virtual and physical mappings */
extern u64			kimage_voffset;

+6 −0
Original line number Diff line number Diff line
@@ -31,4 +31,10 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
			  Elf64_Sym *sym);

#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;
#else
#define module_alloc_base	((u64)_etext - MODULES_VSIZE)
#endif

#endif /* __ASM_MODULE_H */
+1 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@ arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI)		+= acpi.o
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT)		+= paravirt.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE)	+= kaslr.o

obj-y					+= $(arm64-obj-y) vdso/
obj-m					+= $(arm64-obj-m)
+51 −8
Original line number Diff line number Diff line
@@ -210,6 +210,7 @@ section_table:
ENTRY(stext)
	bl	preserve_boot_args
	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
	mov	x23, xzr			// KASLR offset, defaults to 0
	adrp	x24, __PHYS_OFFSET
	bl	set_cpu_boot_mode_flag
	bl	__create_page_tables		// x25=TTBR0, x26=TTBR1
@@ -313,7 +314,7 @@ ENDPROC(preserve_boot_args)
__create_page_tables:
	adrp	x25, idmap_pg_dir
	adrp	x26, swapper_pg_dir
	mov	x27, lr
	mov	x28, lr

	/*
	 * Invalidate the idmap and swapper page tables to avoid potential
@@ -392,6 +393,7 @@ __create_page_tables:
	 */
	mov	x0, x26				// swapper_pg_dir
	ldr	x5, =KIMAGE_VADDR
	add	x5, x5, x23			// add KASLR displacement
	create_pgd_entry x0, x5, x3, x6
	ldr	w6, kernel_img_size
	add	x6, x6, x5
@@ -408,8 +410,7 @@ __create_page_tables:
	dmb	sy
	bl	__inval_cache_range

	mov	lr, x27
	ret
	ret	x28
ENDPROC(__create_page_tables)

kernel_img_size:
@@ -421,6 +422,7 @@ kernel_img_size:
 */
	.set	initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
	mov	x28, lr				// preserve LR
	adr_l	x8, vectors			// load VBAR_EL1 with virtual
	msr	vbar_el1, x8			// vector table address
	isb
@@ -449,19 +451,26 @@ __mmap_switched:
	ldr	x13, [x9, #-8]
	cmp	w12, #R_AARCH64_RELATIVE
	b.ne	1f
	str	x13, [x11]
	add	x13, x13, x23			// relocate
	str	x13, [x11, x23]
	b	0b

1:	cmp	w12, #R_AARCH64_ABS64
	b.ne	0b
	add	x12, x12, x12, lsl #1		// symtab offset: 24x top word
	add	x12, x8, x12, lsr #(32 - 3)	// ... shifted into bottom word
	ldrsh	w14, [x12, #6]			// Elf64_Sym::st_shndx
	ldr	x15, [x12, #8]			// Elf64_Sym::st_value
	cmp	w14, #-0xf			// SHN_ABS (0xfff1) ?
	add	x14, x15, x23			// relocate
	csel	x15, x14, x15, ne
	add	x15, x13, x15
	str	x15, [x11]
	str	x15, [x11, x23]
	b	0b

2:
2:	adr_l	x8, kimage_vaddr		// make relocated kimage_vaddr
	dc	cvac, x8			// value visible to secondaries
	dsb	sy				// with MMU off
#endif

	adr_l	sp, initial_sp, x4
@@ -470,13 +479,23 @@ __mmap_switched:
	msr	sp_el0, x4			// Save thread_info
	str_l	x21, __fdt_pointer, x5		// Save FDT pointer

	ldr	x4, =KIMAGE_VADDR		// Save the offset between
	ldr_l	x4, kimage_vaddr		// Save the offset between
	sub	x4, x4, x24			// the kernel virtual and
	str_l	x4, kimage_voffset, x5		// physical mappings

	mov	x29, #0
#ifdef CONFIG_KASAN
	bl	kasan_early_init
#endif
#ifdef CONFIG_RANDOMIZE_BASE
	cbnz	x23, 0f				// already running randomized?
	mov	x0, x21				// pass FDT address in x0
	bl	kaslr_early_init		// parse FDT for KASLR options
	cbz	x0, 0f				// KASLR disabled? just proceed
	mov	x23, x0				// record KASLR offset
	ret	x28				// we must enable KASLR, return
						// to __enable_mmu()
0:
#endif
	b	start_kernel
ENDPROC(__mmap_switched)
@@ -486,6 +505,10 @@ ENDPROC(__mmap_switched)
 * hotplug and needs to have the same protections as the text region
 */
	.section ".text","ax"

ENTRY(kimage_vaddr)
	.quad		_text - TEXT_OFFSET

/*
 * If we're fortunate enough to boot at EL2, ensure that the world is
 * sane before dropping to EL1.
@@ -651,7 +674,7 @@ ENTRY(secondary_startup)
	adrp	x26, swapper_pg_dir
	bl	__cpu_setup			// initialise processor

	ldr	x8, =KIMAGE_VADDR
	ldr	x8, kimage_vaddr
	ldr	w9, 0f
	sub	x27, x8, w9, sxtw		// address to jump to after enabling the MMU
	b	__enable_mmu
@@ -684,6 +707,7 @@ ENDPROC(__secondary_switched)
 */
	.section	".idmap.text", "ax"
__enable_mmu:
	mrs	x18, sctlr_el1			// preserve old SCTLR_EL1 value
	mrs	x1, ID_AA64MMFR0_EL1
	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -701,6 +725,25 @@ __enable_mmu:
	ic	iallu
	dsb	nsh
	isb
#ifdef CONFIG_RANDOMIZE_BASE
	mov	x19, x0				// preserve new SCTLR_EL1 value
	blr	x27

	/*
	 * If we return here, we have a KASLR displacement in x23 which we need
	 * to take into account by discarding the current kernel mapping and
	 * creating a new one.
	 */
	msr	sctlr_el1, x18			// disable the MMU
	isb
	bl	__create_page_tables		// recreate kernel mapping

	msr	sctlr_el1, x19			// re-enable the MMU
	isb
	ic	ialluis				// flush instructions fetched
	isb					// via old mapping
	add	x27, x27, x23			// relocated __mmap_switched
#endif
	br	x27
ENDPROC(__enable_mmu)

Loading