Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77ad4ce6 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon
Browse files

arm64: memory: rename VA_START to PAGE_END



Prior to commit:

  14c127c9 ("arm64: mm: Flip kernel VA space")

... VA_START described the start of the TTBR1 address space for a given
VA size described by VA_BITS, where all kernel mappings began.

Since that commit, VA_START described a portion midway through the
address space, where the linear map ends and other kernel mappings
begin.

To avoid confusion, let's rename VA_START to PAGE_END, making it clear
that it's not the start of the TTBR1 address space and implying that
it's related to PAGE_OFFSET. Comments and other mnemonics are updated
accordingly, along with a typo fix in the decription of VMEMMAP_SIZE.

There should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: default avatarSteve Capper <steve.capper@arm.com>
Reviewed-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 233947ef
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -28,20 +28,20 @@
 *                a struct page array
 *
 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
 * neads to cover the memory region from the beginning of the 52-bit
 * PAGE_OFFSET all the way to VA_START for 48-bit. This allows us to
 * needs to cover the memory region from the beginning of the 52-bit
 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
 * keep a constant PAGE_OFFSET and "fallback" to using the higher end
 * of the VMEMMAP where 52-bit support is not available in hardware.
 */
#define VMEMMAP_SIZE ((_VA_START(VA_BITS_MIN) - PAGE_OFFSET) \
#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
			>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))

/*
 * PAGE_OFFSET - the virtual address of the start of the linear map (top
 *		 (VA_BITS - 1))
 * KIMAGE_VADDR - the virtual address of the start of the kernel image
 * PAGE_OFFSET - the virtual address of the start of the linear map, at the
 *               start of the TTBR1 address space.
 * PAGE_END - the end of the linear map, where all other kernel mappings begin.
 * KIMAGE_VADDR - the virtual address of the start of the kernel image.
 * VA_BITS - the maximum number of bits for virtual addresses.
 * VA_START - the first kernel virtual address.
 */
#define VA_BITS			(CONFIG_ARM64_VA_BITS)
#define _PAGE_OFFSET(va)	(-(UL(1) << (va)))
@@ -64,7 +64,7 @@
#define VA_BITS_MIN		(VA_BITS)
#endif

#define _VA_START(va)		(-(UL(1) << ((va) - 1)))
#define _PAGE_END(va)		(-(UL(1) << ((va) - 1)))

#define KERNEL_START		_text
#define KERNEL_END		_end
@@ -87,7 +87,7 @@
#define KASAN_THREAD_SHIFT	1
#else
#define KASAN_THREAD_SHIFT	0
#define KASAN_SHADOW_END	(_VA_START(VA_BITS_MIN))
#define KASAN_SHADOW_END	(_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */

#define MIN_THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
@@ -173,7 +173,7 @@

#ifndef __ASSEMBLY__
extern u64			vabits_actual;
#define VA_START		(_VA_START(vabits_actual))
#define PAGE_END		(_PAGE_END(vabits_actual))

#include <linux/bitops.h>
#include <linux/mmdebug.h>
+2 −2
Original line number Diff line number Diff line
@@ -856,8 +856,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,

#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

#define kc_vaddr_to_offset(v)	((v) & ~VA_START)
#define kc_offset_to_vaddr(o)	((o) | VA_START)
#define kc_vaddr_to_offset(v)	((v) & ~PAGE_END)
#define kc_offset_to_vaddr(o)	((o) | PAGE_END)

#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+1 −1
Original line number Diff line number Diff line
@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
		rc = -ENOMEM;
		goto out;
	}
	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START);
	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
	if (rc)
		goto out;

+3 −3
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@

enum address_markers_idx {
	PAGE_OFFSET_NR = 0,
	VA_START_NR,
	PAGE_END_NR,
#ifdef CONFIG_KASAN
	KASAN_START_NR,
#endif
@@ -36,7 +36,7 @@ enum address_markers_idx {

static struct addr_marker address_markers[] = {
	{ PAGE_OFFSET,			"Linear Mapping start" },
	{ 0 /* VA_START */,		"Linear Mapping end" },
	{ 0 /* PAGE_END */,		"Linear Mapping end" },
#ifdef CONFIG_KASAN
	{ 0 /* KASAN_SHADOW_START */,	"Kasan shadow start" },
	{ KASAN_SHADOW_END,		"Kasan shadow end" },
@@ -411,7 +411,7 @@ void ptdump_check_wx(void)

static int ptdump_init(void)
{
	address_markers[VA_START_NR].start_address = VA_START;
	address_markers[PAGE_END_NR].start_address = PAGE_END;
#ifdef CONFIG_KASAN
	address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
#endif
+1 −1
Original line number Diff line number Diff line
@@ -226,7 +226,7 @@ void __init kasan_init(void)
	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));

	kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START),
	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
				   (void *)mod_shadow_start);
	kasan_populate_early_shadow((void *)kimg_shadow_end,
				   (void *)KASAN_SHADOW_END);
Loading