Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7f8de16 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas
Browse files

arm64: allow kernel Image to be loaded anywhere in physical memory



This relaxes the kernel Image placement requirements, so that it
may be placed at any 2 MB aligned offset in physical memory.

This is accomplished by ignoring PHYS_OFFSET when installing
memblocks, and accounting for the apparent virtual offset of
the kernel Image. As a result, virtual address references
below PAGE_OFFSET are correctly mapped onto physical references
into the kernel Image regardless of where it sits in memory.

Special care needs to be taken for dealing with memory limits passed
via mem=, since the generic implementation clips memory top down, which
may clip the kernel image itself if it is loaded high up in memory. To
deal with this case, we simply add back the memory covering the kernel
image, which may result in more memory to be retained than was passed
as a mem= parameter.

Since mem= should not be considered a production feature, a panic notifier
handler is installed that dumps the memory limit at panic time if one was
set.

Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent a89dea58
Loading
Loading
Loading
Loading
+13 −7
Original line number Diff line number Diff line
@@ -109,7 +109,13 @@ Header notes:
			1 - 4K
			2 - 16K
			3 - 64K
  Bits 3-63:	Reserved.
  Bit 3:	Kernel physical placement
			0 - 2MB aligned base should be as close as possible
			    to the base of DRAM, since memory below it is not
			    accessible via the linear mapping
			1 - 2MB aligned base may be anywhere in physical
			    memory
  Bits 4-63:	Reserved.

- When image_size is zero, a bootloader should attempt to keep as much
  memory as possible free for use by the kernel immediately after the
@@ -117,14 +123,14 @@ Header notes:
  depending on selected features, and is effectively unbound.

The Image must be placed text_offset bytes from a 2MB aligned base
address near the start of usable system RAM and called there. Memory
below that base address is currently unusable by Linux, and therefore it
is strongly recommended that this location is the start of system RAM.
The region between the 2 MB aligned base address and the start of the
image has no special significance to the kernel, and may be used for
other purposes.
address anywhere in usable system RAM and called there. The region
between the 2 MB aligned base address and the start of the image has no
special significance to the kernel, and may be used for other purposes.
At least image_size bytes from the start of the image must be free for
use by the kernel.
NOTE: versions prior to v4.6 cannot make use of memory below the
physical offset of the Image so it is recommended that the Image be
placed as close as possible to the start of system RAM.

Any memory described to the kernel (even that below the start of the
image) which is not marked as reserved from the kernel (e.g., with a
+6 −0
Original line number Diff line number Diff line
@@ -11,4 +11,10 @@
#define MIN_FDT_ALIGN		8
#define MAX_FDT_SIZE		SZ_2M

/*
 * arm64 requires the kernel image to placed
 * TEXT_OFFSET bytes beyond a 2 MB aligned base
 */
#define MIN_KIMG_ALIGN		SZ_2M

#endif
+12 −0
Original line number Diff line number Diff line
@@ -79,5 +79,17 @@
#define SWAPPER_MM_MMUFLAGS	(PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#endif

/*
 * To make optimal use of block mappings when laying out the linear
 * mapping, round down the base of physical memory to a size that can
 * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
 * (64k granule), or a multiple that can be mapped using contiguous bits
 * in the page tables: 32 * PMD_SIZE (16k granule)
 */
#ifdef CONFIG_ARM64_64K_PAGES
#define ARM64_MEMSTART_ALIGN	SZ_512M
#else
#define ARM64_MEMSTART_ALIGN	SZ_1G
#endif

#endif	/* __ASM_KERNEL_PGTABLE_H */
+1 −16
Original line number Diff line number Diff line
@@ -26,24 +26,9 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT	0
#define KVM_ARM64_DEBUG_DIRTY		(1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)

#define kvm_ksym_ref(sym)		((void *)&sym + kvm_ksym_shift)
#define kvm_ksym_ref(sym)		phys_to_virt((u64)&sym - kimage_voffset)

#ifndef __ASSEMBLY__
#if __GNUC__ > 4
#define kvm_ksym_shift			(PAGE_OFFSET - KIMAGE_VADDR)
#else
/*
 * GCC versions 4.9 and older will fold the constant below into the addend of
 * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
 * constant is used directly. However, since we use the small code model for
 * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
 * with a +/- 4 GB range, resulting in linker relocation errors if the shift
 * is sufficiently large. So prevent the compiler from folding the shift into
 * the addend, by making the shift a variable with external linkage.
 */
__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
#endif

struct kvm;
struct kvm_vcpu;

+10 −8
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
#include <asm/bug.h>
#include <asm/sizes.h>

/*
@@ -88,10 +89,10 @@
#define __virt_to_phys(x) ({						\
	phys_addr_t __x = (phys_addr_t)(x);				\
	__x >= PAGE_OFFSET ? (__x - PAGE_OFFSET + PHYS_OFFSET) :	\
			     (__x - KIMAGE_VADDR + PHYS_OFFSET); })
			     (__x - kimage_voffset); })

#define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
#define __phys_to_kimg(x)	((unsigned long)((x) - PHYS_OFFSET + KIMAGE_VADDR))
#define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))

/*
 * Convert a page to/from a physical address
@@ -133,15 +134,16 @@

extern phys_addr_t		memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET		({ memstart_addr; })
#define PHYS_OFFSET		({ BUG_ON(memstart_addr & 1); memstart_addr; })

/* the offset between the kernel virtual and physical mappings */
extern u64			kimage_voffset;

/*
 * The maximum physical address that the linear direct mapping
 * of system RAM can cover. (PAGE_OFFSET can be interpreted as
 * a 2's complement signed quantity and negated to derive the
 * maximum size of the linear mapping.)
 * Allow all memory at the discovery stage. We will clip it later.
 */
#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
#define MIN_MEMBLOCK_ADDR	0
#define MAX_MEMBLOCK_ADDR	U64_MAX

/*
 * PFNs are used to describe any physical page; this means
Loading