Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 09e61a77 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar
Browse files

x86/mm: Make __VIRTUAL_MASK_SHIFT dynamic



For boot-time switching between paging modes, we need to be able to
adjust virtual mask shifts.

The change doesn't affect the kernel image size much:

   text	   data	    bss	    dec	    hex	filename
8628892	4734340	1368064	14731296	 e0c820	vmlinux.before
8628966	4734340	1368064	14731370	 e0c86a	vmlinux.after

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214111656.88514-9-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 162434e7
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -274,8 +274,20 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
	 * Change top bits to match most significant bit (47th or 56th bit
	 * depending on paging mode) in the address.
	 */
#ifdef CONFIG_X86_5LEVEL
	testl	$1, pgtable_l5_enabled(%rip)
	jz	1f
	shl	$(64 - 57), %rcx
	sar	$(64 - 57), %rcx
	jmp	2f
1:
	shl	$(64 - 48), %rcx
	sar	$(64 - 48), %rcx
2:
#else
	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
#endif

	/* If this changed %rcx, it was not canonical */
	cmpq	%rcx, %r11
+1 −1
Original line number Diff line number Diff line
@@ -56,7 +56,7 @@
#define __PHYSICAL_MASK_SHIFT	52

#ifdef CONFIG_X86_5LEVEL
#define __VIRTUAL_MASK_SHIFT	56
#define __VIRTUAL_MASK_SHIFT	(pgtable_l5_enabled ? 56 : 47)
#else
#define __VIRTUAL_MASK_SHIFT	47
#endif
+10 −2
Original line number Diff line number Diff line
@@ -85,8 +85,12 @@ static struct addr_marker address_markers[] = {
	[VMALLOC_START_NR]	= { 0UL,		"vmalloc() Area" },
	[VMEMMAP_START_NR]	= { 0UL,		"Vmemmap" },
#ifdef CONFIG_KASAN
	[KASAN_SHADOW_START_NR]	= { KASAN_SHADOW_START,	"KASAN shadow" },
	[KASAN_SHADOW_END_NR]	= { KASAN_SHADOW_END,	"KASAN shadow end" },
	/*
	 * These fields get initialized with the (dynamic)
	 * KASAN_SHADOW_{START,END} values in pt_dump_init().
	 */
	[KASAN_SHADOW_START_NR]	= { 0UL,		"KASAN shadow" },
	[KASAN_SHADOW_END_NR]	= { 0UL,		"KASAN shadow end" },
#endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL
	[LDT_NR]		= { 0UL,		"LDT remap" },
@@ -571,6 +575,10 @@ static int __init pt_dump_init(void)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
	address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
#endif
#ifdef CONFIG_KASAN
	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
#endif
#endif
#ifdef CONFIG_X86_32
	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+3 −1
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
	unsigned long *base;
	unsigned long size_tb;
} kaslr_regions[] = {
	{ &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
	{ &page_offset_base, 0 },
	{ &vmalloc_base, VMALLOC_SIZE_TB },
	{ &vmemmap_base, 1 },
};
@@ -93,6 +93,8 @@ void __init kernel_randomize_memory(void)
	if (!kaslr_memory_enabled())
		return;

	kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);

	/*
	 * Update Physical memory mapping to available and
	 * add padding if needed (especially for memory hotplug support).