Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 614dd058 authored by Russell King's avatar Russell King
Browse files

ARM: pgtable: collect up identity mapping functions



We have two places where we create identity mappings - one when we bring
secondary CPUs online, and one where we setup some mappings for soft-
reboot.  Combine these two into a single implementation.  Also collect
the identity mapping deletion function.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 26bbf0b5
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -474,6 +474,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)

#define pgtable_cache_init() do { } while (0)

void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);

#endif /* !__ASSEMBLY__ */

#endif /* CONFIG_MMU */
+0 −34
Original line number Diff line number Diff line
@@ -68,40 +68,6 @@ enum ipi_msg_type {
	IPI_CPU_STOP,
};

static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
	unsigned long end)
{
	unsigned long addr, prot;
	pmd_t *pmd;

	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
		prot |= PMD_BIT4;

	for (addr = start & PGDIR_MASK; addr < end;) {
		pmd = pmd_offset(pgd + pgd_index(addr), addr);
		pmd[0] = __pmd(addr | prot);
		addr += SECTION_SIZE;
		pmd[1] = __pmd(addr | prot);
		addr += SECTION_SIZE;
		flush_pmd_entry(pmd);
	}
}

static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
	unsigned long end)
{
	unsigned long addr;
	pmd_t *pmd;

	for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
		pmd = pmd_offset(pgd + pgd_index(addr), addr);
		pmd[0] = __pmd(0);
		pmd[1] = __pmd(0);
		clean_pmd_entry(pmd);
	}
}

int __cpuinit __cpu_up(unsigned int cpu)
{
	struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
+2 −2
Original line number Diff line number Diff line
@@ -5,8 +5,8 @@
obj-y				:= dma-mapping.o extable.o fault.o init.o \
				   iomap.o

obj-$(CONFIG_MMU)		+= fault-armv.o flush.o ioremap.o mmap.o \
				   pgd.o mmu.o vmregion.o
obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \
				   mmap.o pgd.o mmu.o vmregion.o

ifneq ($(CONFIG_MMU),y)
obj-y				+= nommu.o

arch/arm/mm/idmap.c

0 → 100644
+51 −0
Original line number Diff line number Diff line
#include <linux/kernel.h>

#include <asm/cputype.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>

void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
	unsigned long prot;

	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
		prot |= PMD_BIT4;

	for (addr &= PGDIR_MASK; addr < end;) {
		pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr);
		pmd[0] = __pmd(addr | prot);
		addr += SECTION_SIZE;
		pmd[1] = __pmd(addr | prot);
		addr += SECTION_SIZE;
		flush_pmd_entry(pmd);
	}
}

#ifdef CONFIG_SMP
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
{
	for (addr &= PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
		pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr);
		pmd[0] = __pmd(0);
		pmd[1] = __pmd(0);
		clean_pmd_entry(pmd);
	}
}
#endif

/*
 * In order to soft-boot, we need to insert a 1:1 mapping in place of
 * the user-mode pages.  This will then ensure that we have predictable
 * results when turning the mmu off
 */
void setup_mm_for_reboot(char mode)
{
	/*
	 * We need to access to user-mode page tables here. For kernel threads
	 * we don't have any user-mode mappings so we use the context that we
	 * "borrowed".
	 */
	identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
	local_flush_tlb_all();
}
+0 −35
Original line number Diff line number Diff line
@@ -1045,38 +1045,3 @@ void __init paging_init(struct machine_desc *mdesc)
	empty_zero_page = virt_to_page(zero_page);
	__flush_dcache_page(NULL, empty_zero_page);
}

/*
 * In order to soft-boot, we need to insert a 1:1 mapping in place of
 * the user-mode pages.  This will then ensure that we have predictable
 * results when turning the mmu off
 */
void setup_mm_for_reboot(char mode)
{
	unsigned long base_pmdval;
	pgd_t *pgd;
	int i;

	/*
	 * We need to access to user-mode page tables here. For kernel threads
	 * we don't have any user-mode mappings so we use the context that we
	 * "borrowed".
	 */
	pgd = current->active_mm->pgd;

	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
		base_pmdval |= PMD_BIT4;

	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
		pmd_t *pmd;

		pmd = pmd_off(pgd, i << PGDIR_SHIFT);
		pmd[0] = __pmd(pmdval);
		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
		flush_pmd_entry(pmd);
	}

	local_flush_tlb_all();
}