Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c011fbf authored by Mark Salter's avatar Mark Salter Committed by Joonwoo Park
Browse files

arm64: Add function to create identity mappings



At boot time, before switching to a virtual UEFI memory map, firmware
expects UEFI memory and IO regions to be identity mapped whenever
kernel makes runtime services calls. The existing early boot code
creates an identity map of kernel text/data but this is not sufficient
for UEFI. This patch adds a create_id_mapping() function which reuses
the core code of the existing create_mapping().

Signed-off-by: default avatarMark Salter <msalter@redhat.com>
[ Fixed error message formatting (%pa). ]
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarLeif Lindholm <leif.lindholm@linaro.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


Git-commit: d7ecbddf4caefbac1b99478dd2b679f83dfc2545
[joonwoop@codeaurora.org: fixed conflict with '0e3d68: arm64: Support
 early fixup for CMA']
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 87d8c48a
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -35,5 +35,7 @@ extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void mem_text_write_kernel_word(u32 *addr, u32 word);
extern void init_mem_pgprot(void);
/* create an identity mapping for memory (or io if map_io is true) */
extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);

#endif
+44 −17
Original line number Diff line number Diff line
@@ -262,7 +262,8 @@ static void __init *early_alloc(unsigned long sz)
}

static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn)
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot)
{
	pte_t *pte;

@@ -274,7 +275,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,

	pte = pte_offset_kernel(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -301,10 +302,21 @@ pmdval_t get_pmd_prot_sect_kernel(unsigned long addr)

static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
				  unsigned long end, phys_addr_t phys,
				  bool pages)
				  int map_io, bool pages)
{
	pmd_t *pmd;
	unsigned long next;
	pmdval_t prot_sect;
	pgprot_t prot_pte;

	if (map_io) {
		prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
			    PMD_ATTRINDX(MT_DEVICE_nGnRE);
		prot_pte = __pgprot(PROT_DEVICE_nGnRE);
	} else {
		prot_sect = prot_sect_kernel;
		prot_pte = PAGE_KERNEL_EXEC;
	}

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
@@ -329,7 +341,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
			if (!pmd_none(old_pmd))
				flush_tlb_all();
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot_pte);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
@@ -337,14 +350,14 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,

static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
				  unsigned long end, unsigned long phys,
				  bool force_pages)
				  int map_io, bool force_pages)
{
	pud_t *pud = pud_offset(pgd, addr);
	unsigned long next;

	do {
		next = pud_addr_end(addr, end);
		alloc_init_pmd(pud, addr, next, phys, force_pages);
		alloc_init_pmd(pud, addr, next, phys, map_io, force_pages);
		phys += next - addr;
	} while (pud++, addr = next, addr != end);
}
@@ -353,30 +366,44 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 * Create the page directory entries and any necessary page tables for the
 * mapping specified by 'md'.
 */
static void __init create_mapping(phys_addr_t phys, unsigned long virt,
				  phys_addr_t size, bool force_pages)
static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
				    unsigned long virt, phys_addr_t size,
				    int map_io, bool force_pages)
{
	unsigned long addr, length, end, next;
	pgd_t *pgd;

	if (virt < VMALLOC_START) {
		pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
			   phys, virt);
		return;
	}

	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));

	pgd = pgd_offset_k(addr);
	end = addr + length;
	do {
		next = pgd_addr_end(addr, end);
		alloc_init_pud(pgd, addr, next, phys, force_pages);
		alloc_init_pud(pgd, addr, next, phys, map_io, force_pages);
		phys += next - addr;
	} while (pgd++, addr = next, addr != end);
}

static void __init create_mapping(phys_addr_t phys, unsigned long virt,
				  phys_addr_t size, bool force_pages)
{
	if (virt < VMALLOC_START) {
		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
			&phys, virt);
		return;
	}
	__create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0, force_pages);
}

void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
{
	if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
		pr_warn("BUG: not creating id mapping for %pa\n", &addr);
		return;
	}
	__create_mapping(&idmap_pg_dir[pgd_index(addr)],
			 addr, addr, size, map_io, false);
}

static inline pmd_t *pmd_off_k(unsigned long virt)
{
	return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);