Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b9952ec7 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar
Browse files

x86/xen: Allow XEN_PV and XEN_PVH to be enabled with X86_5LEVEL



With boot-time switching between paging modes, XEN_PV and XEN_PVH can be
boot into 4-level paging mode.

Tested-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180216114948.68868-2-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6657fca0
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -38,12 +38,12 @@
 *
 */

#define l4_index(x)	(((x) >> 39) & 511)
#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))

#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE_L4)
PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
#endif
L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
L4_START_KERNEL = l4_index(__START_KERNEL_map)

L3_START_KERNEL = pud_index(__START_KERNEL_map)

	.text
@@ -386,9 +386,9 @@ NEXT_PAGE(early_dynamic_pgts)
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
NEXT_PGD_PAGE(init_top_pgt)
	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
	.org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
	.org    init_top_pgt + PGD_START_KERNEL*8, 0
	.org    init_top_pgt + L4_START_KERNEL*8, 0
	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
	.fill	PTI_USER_PGD_FILL,8,0
+0 −5
Original line number Diff line number Diff line
@@ -18,9 +18,6 @@ config XEN_PV
	bool "Xen PV guest support"
	default y
	depends on XEN
	# XEN_PV is not ready to work with 5-level paging.
	# Changes to hypervisor are also required.
	depends on !X86_5LEVEL
	select XEN_HAVE_PVMMU
	select XEN_HAVE_VPMU
	help
@@ -79,6 +76,4 @@ config XEN_DEBUG_FS
config XEN_PVH
	bool "Support for running as a PVH guest"
	depends on XEN && XEN_PVHVM && ACPI
	# Pre-built page tables are not ready to handle 5-level paging.
	depends on !X86_5LEVEL
	def_bool n
+21 −0
Original line number Diff line number Diff line
@@ -538,6 +538,22 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)

	xen_mc_issue(PARAVIRT_LAZY_MMU);
}

#if CONFIG_PGTABLE_LEVELS >= 5
__visible p4dval_t xen_p4d_val(p4d_t p4d)
{
	return pte_mfn_to_pfn(p4d.p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);

__visible p4d_t xen_make_p4d(p4dval_t p4d)
{
	p4d = pte_pfn_to_mfn(p4d);

	return native_make_p4d(p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
#endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
#endif	/* CONFIG_X86_64 */

static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
@@ -2411,6 +2427,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {

	.alloc_pud = xen_alloc_pmd_init,
	.release_pud = xen_release_pmd_init,

#if CONFIG_PGTABLE_LEVELS >= 5
	.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
	.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
#endif
#endif	/* CONFIG_X86_64 */

	.activate_mm = xen_activate_mm,