Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 91f606a8 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar
Browse files

x86/mm: Replace compile-time checks for 5-level paging with runtime-time checks



This patch converts the of CONFIG_X86_5LEVEL check to runtime checks for
p4d folding.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214182542.69302-9-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 98219dda
Loading
Loading
Loading
Loading
+10 −13
Original line number Diff line number Diff line
@@ -217,29 +217,26 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)

static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
	p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
#else
	pgd_t pgd;

	if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
		*p4dp = p4d;
#endif
		return;
	}

	pgd = native_make_pgd(p4d_val(p4d));
	pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
	*p4dp = native_make_p4d(pgd_val(pgd));
}

static inline void native_p4d_clear(p4d_t *p4d)
{
#ifdef CONFIG_X86_5LEVEL
	native_set_p4d(p4d, native_make_p4d(0));
#else
	native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
#endif
}

static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
	*pgdp = pti_set_user_pgd(pgdp, pgd);
#else
	*pgdp = pgd;
#endif
}

static inline void native_pgd_clear(pgd_t *pgd)
+1 −3
Original line number Diff line number Diff line
@@ -348,9 +348,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
				void *pt)
{
	if (__pa(pt) == __pa(kasan_zero_pmd) ||
#ifdef CONFIG_X86_5LEVEL
	    __pa(pt) == __pa(kasan_zero_p4d) ||
#endif
	    (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) ||
	    __pa(pt) == __pa(kasan_zero_pud)) {
		pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
		note_page(m, st, __pgprot(prot), 5);
+2 −2
Original line number Diff line number Diff line
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address)
	if (pgd_none(*pgd_ref))
		return -1;

	if (CONFIG_PGTABLE_LEVELS > 4) {
	if (pgtable_l5_enabled) {
		if (pgd_none(*pgd)) {
			set_pgd(pgd, *pgd_ref);
			arch_flush_lazy_mmu_mode();
@@ -454,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
	if (p4d_none(*p4d_ref))
		return -1;

	if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
	if (p4d_none(*p4d) && !pgtable_l5_enabled) {
		set_p4d(p4d, *p4d_ref);
		arch_flush_lazy_mmu_mode();
	} else {
+1 −1
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
		result = ident_p4d_init(info, p4d, addr, next);
		if (result)
			return result;
		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		if (pgtable_l5_enabled) {
			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
		} else {
			/*
+18 −12
Original line number Diff line number Diff line
@@ -88,12 +88,7 @@ static int __init nonx32_setup(char *str)
}
__setup("noexec32=", nonx32_setup);

/*
 * When memory was added make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
#ifdef CONFIG_X86_5LEVEL
void sync_global_pgds(unsigned long start, unsigned long end)
static void sync_global_pgds_l5(unsigned long start, unsigned long end)
{
	unsigned long addr;

@@ -129,8 +124,8 @@ void sync_global_pgds(unsigned long start, unsigned long end)
		spin_unlock(&pgd_lock);
	}
}
#else
void sync_global_pgds(unsigned long start, unsigned long end)

static void sync_global_pgds_l4(unsigned long start, unsigned long end)
{
	unsigned long addr;

@@ -173,7 +168,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
		spin_unlock(&pgd_lock);
	}
}
#endif

/*
 * When memory was added make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
void sync_global_pgds(unsigned long start, unsigned long end)
{
	if (pgtable_l5_enabled)
		sync_global_pgds_l5(start, end);
	else
		sync_global_pgds_l4(start, end);
}

/*
 * NOTE: This function is marked __ref because it calls __init function
@@ -632,7 +638,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = p4d_index(vaddr);

	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
	if (!pgtable_l5_enabled)
		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);

	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
@@ -712,7 +718,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
					   page_size_mask);

		spin_lock(&init_mm.page_table_lock);
		if (IS_ENABLED(CONFIG_X86_5LEVEL))
		if (pgtable_l5_enabled)
			pgd_populate(&init_mm, pgd, p4d);
		else
			p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
@@ -1093,7 +1099,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
		 * 5-level case we should free them. This code will have to change
		 * to adapt for boot-time switching between 4 and 5 level page tables.
		 */
		if (CONFIG_PGTABLE_LEVELS == 5)
		if (pgtable_l5_enabled)
			free_pud_table(pud_base, p4d, altmap);
	}

Loading