Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e82ea94 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar
Browse files

x86/mm: Make kernel_physical_mapping_init() support 5-level paging



Populate additional page table level if CONFIG_X86_5LEVEL is enabled.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170606113133.22974-12-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 141efad7
Loading
Loading
Loading
Loading
+60 −9
Original line number Original line Diff line number Diff line
@@ -624,6 +624,57 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	return paddr_last;
	return paddr_last;
}
}


static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
{
	unsigned long paddr_next, paddr_last = paddr_end;
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = p4d_index(vaddr);

	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);

	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
		p4d_t *p4d;
		pud_t *pud;

		vaddr = (unsigned long)__va(paddr);
		p4d = p4d_page + p4d_index(vaddr);
		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;

		if (paddr >= paddr_end) {
			if (!after_bootmem &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RAM) &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RESERVED_KERN))
				set_p4d(p4d, __p4d(0));
			continue;
		}

		if (!p4d_none(*p4d)) {
			pud = pud_offset(p4d, 0);
			paddr_last = phys_pud_init(pud, paddr,
					paddr_end,
					page_size_mask);
			__flush_tlb_all();
			continue;
		}

		pud = alloc_low_page();
		paddr_last = phys_pud_init(pud, paddr, paddr_end,
					   page_size_mask);

		spin_lock(&init_mm.page_table_lock);
		p4d_populate(&init_mm, p4d, pud);
		spin_unlock(&init_mm.page_table_lock);
	}
	__flush_tlb_all();

	return paddr_last;
}

/*
/*
 * Create page table mapping for the physical memory for specific physical
 * Create page table mapping for the physical memory for specific physical
 * addresses. The virtual and physical addresses have to be aligned on PMD level
 * addresses. The virtual and physical addresses have to be aligned on PMD level
@@ -645,26 +696,26 @@ kernel_physical_mapping_init(unsigned long paddr_start,
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		pgd_t *pgd = pgd_offset_k(vaddr);
		pgd_t *pgd = pgd_offset_k(vaddr);
		p4d_t *p4d;
		p4d_t *p4d;
		pud_t *pud;


		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;


		BUILD_BUG_ON(pgd_none(*pgd));
		if (pgd_val(*pgd)) {
		p4d = p4d_offset(pgd, vaddr);
			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
		if (p4d_val(*p4d)) {
			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
			pud = (pud_t *)p4d_page_vaddr(*p4d);
			paddr_last = phys_pud_init(pud, __pa(vaddr),
						   __pa(vaddr_end),
						   __pa(vaddr_end),
						   page_size_mask);
						   page_size_mask);
			continue;
			continue;
		}
		}


		pud = alloc_low_page();
		p4d = alloc_low_page();
		paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
					   page_size_mask);
					   page_size_mask);


		spin_lock(&init_mm.page_table_lock);
		spin_lock(&init_mm.page_table_lock);
		p4d_populate(&init_mm, p4d, pud);
		if (IS_ENABLED(CONFIG_X86_5LEVEL))
			pgd_populate(&init_mm, pgd, p4d);
		else
			p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
		spin_unlock(&init_mm.page_table_lock);
		spin_unlock(&init_mm.page_table_lock);
		pgd_changed = true;
		pgd_changed = true;
	}
	}