Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 00516964 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Rohit Vaswani
Browse files

arm64/mm: add create_pgd_mapping() to create private page tables



For UEFI, we need to install the memory mappings used for Runtime Services
in a dedicated set of page tables. Add create_pgd_mapping(), which allows
us to allocate and install those page table entries early.

Change-Id: I49075f5068756412cb81503d480d91821cfa59c4
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Tested-by: default avatarLeif Lindholm <leif.lindholm@linaro.org>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Git-commit: 8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8)
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


[rvaswani@codeaurora.org: fixed merge conflicts]
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
parent 1ad6bdf3
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -34,5 +34,8 @@ extern void init_mem_pgprot(void);
/* create an identity mapping for memory (or io if map_io is true) */
extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
extern void mem_text_write_kernel_word(u32 *addr, u32 word);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       unsigned long virt, phys_addr_t size,
			       pgprot_t prot);

#endif
+5 −0
Original line number Diff line number Diff line
@@ -264,6 +264,11 @@ static inline pmd_t pte_pmd(pte_t pte)
	return __pmd(pte_val(pte));
}

static inline pgprot_t mk_sect_prot(pgprot_t prot)
{
	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
}

/*
 * THP definitions.
 */
+23 −21
Original line number Diff line number Diff line
@@ -265,20 +265,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,

static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, int map_io, bool pages)
				  phys_addr_t phys, pgprot_t prot, bool pages)
{
	pmd_t *pmd;
	unsigned long next;
	pmdval_t prot_sect;
	pgprot_t prot_pte;

	if (map_io) {
		prot_sect = PROT_SECT_DEVICE_nGnRE;
		prot_pte = __pgprot(PROT_DEVICE_nGnRE);
	} else {
		prot_sect = PROT_SECT_NORMAL_EXEC;
		prot_pte = PAGE_KERNEL;
	}

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
@@ -294,7 +284,8 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
		/* try section mapping first */
		if (!pages && ((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys | prot_sect));
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
@@ -303,7 +294,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				flush_tlb_all();
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot_pte);
				       prot);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
@@ -311,7 +302,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,

static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, int map_io, bool force_pages)
				  phys_addr_t phys, pgprot_t prot, bool force_pages)
{
	pud_t *pud;
	unsigned long next;
@@ -329,11 +320,12 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
		/*
		 * For 4K granule only, attempt to put down a 1GB block
		 */
		if (!map_io && (PAGE_SHIFT == 12) &&
		if ((PAGE_SHIFT == 12) &&
		    (((addr | next | phys) & ~PUD_MASK) == 0) &&
				!dma_overlap(phys, phys + next - addr)) {
			pud_t old_pud = *pud;
			set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
			set_pud(pud, __pud(phys |
					   pgprot_val(mk_sect_prot(prot))));

			/*
			 * If we have an old value for a pud, it will
@@ -348,7 +340,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
				flush_tlb_all();
			}
		} else {
			alloc_init_pmd(mm, pud, addr, next, phys, map_io, force_pages);
			alloc_init_pmd(mm, pud, addr, next, phys, prot, force_pages);
		}
		phys += next - addr;
	} while (pud++, addr = next, addr != end);
@@ -361,7 +353,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,

static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
				    phys_addr_t phys, unsigned long virt,
				    phys_addr_t size, int map_io, bool force_pages)
				    phys_addr_t size, pgprot_t prot, bool force_pages)
{
	unsigned long addr, length, end, next;

@@ -371,7 +363,7 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
	end = addr + length;
	do {
		next = pgd_addr_end(addr, end);
		alloc_init_pud(mm, pgd, addr, next, phys, map_io, force_pages);
		alloc_init_pud(mm, pgd, addr, next, phys, prot, force_pages);
		phys += next - addr;
	} while (pgd++, addr = next, addr != end);
}
@@ -385,7 +377,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
		return;
	}
	__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
			 size, 0, force_pages);
			 size, PAGE_KERNEL_EXEC, force_pages);
}

static inline pmd_t *pmd_off_k(unsigned long virt)
@@ -454,7 +446,17 @@ void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
		return;
	}
	__create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)],
			 addr, addr, size, map_io, false);
			 addr, addr, size,
			 map_io ? __pgprot(PROT_DEVICE_nGnRE)
				: PAGE_KERNEL_EXEC,
			 false);
}

void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       unsigned long virt, phys_addr_t size,
			       pgprot_t prot)
{
	__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, false);
}

static void __init map_mem(void)