Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7edd88ad authored by Catalin Marinas's avatar Catalin Marinas
Browse files

arm64: Do not initialise the fixmap page tables in head.S



The early_ioremap_init() function already handles fixmap pte
initialisation, so upgrade this to cover all of pud/pmd/pte and remove
one page from swapper_pg_dir.

Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Tested-by: default avatarJungseok Lee <jungseoklee85@gmail.com>
parent affeafbb
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -33,11 +33,11 @@

/*
 * The idmap and swapper page tables need some space reserved in the kernel
 * image. The idmap only requires a pgd and a next level table to (section) map
 * the kernel, while the swapper also maps the FDT and requires an additional
 * table to map an early UART. See __create_page_tables for more information.
 * image. Both require a pgd and a next level table to (section) map the
 * kernel. The the swapper also maps the FDT (see __create_page_tables for
 * more information).
 */
#define SWAPPER_DIR_SIZE	(3 * PAGE_SIZE)
#define SWAPPER_DIR_SIZE	(2 * PAGE_SIZE)
#define IDMAP_DIR_SIZE		(2 * PAGE_SIZE)

#ifndef __ASSEMBLY__
+0 −7
Original line number Diff line number Diff line
@@ -582,13 +582,6 @@ __create_page_tables:
	sub	x6, x6, #1			// inclusive range
	create_block_map x0, x7, x3, x5, x6
1:
	/*
	 * Create the pgd entry for the fixed mappings.
	 */
	ldr	x5, =FIXADDR_TOP		// Fixed mapping virtual address
	add	x0, x26, #2 * PAGE_SIZE		// section table address
	create_pgd_entry x26, x0, x5, x6, x7

	/*
	 * Since the page tables have been populated with non-cacheable
	 * accesses (MMU disabled), invalidate the idmap and swapper page
+18 −8
Original line number Diff line number Diff line
@@ -103,19 +103,25 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
}
EXPORT_SYMBOL(ioremap_cache);

#ifndef CONFIG_ARM64_64K_PAGES
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
#ifndef CONFIG_ARM64_64K_PAGES
static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif

static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
static inline pud_t * __init early_ioremap_pud(unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;

	pgd = pgd_offset_k(addr);
	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));

	pud = pud_offset(pgd, addr);
	return pud_offset(pgd, addr);
}

static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
	pud_t *pud = early_ioremap_pud(addr);

	BUG_ON(pud_none(*pud) || pud_bad(*pud));

	return pmd_offset(pud, addr);
@@ -132,13 +138,17 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)

void __init early_ioremap_init(void)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);

	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
#ifndef CONFIG_ARM64_64K_PAGES
	/* need to populate pmd for 4k pagesize only */
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pud_populate(&init_mm, pud, bm_pmd);
	pmd = pmd_offset(pud, addr);
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
#endif

	/*
	 * The boot-ioremap range spans multiple pmds, for which
	 * we are not prepared: