Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d75a97e authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm: mm: consider only lowmem regions while remap"

parents 33a4e3e3 89e53bc5
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -63,6 +63,17 @@ config DEBUG_USER
	      8 - SIGSEGV faults
	     16 - SIGBUS faults

config FORCE_PAGES
	bool "Force lowmem to be mapped with 4K pages"
        help
          There are some advanced debug features that can only be done when
          memory is mapped with pages instead of sections. Enable this option
          to always map lowmem pages with pages. This may have a performance
          cost due to increased TLB pressure.

          If unsure say N.


# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
	bool "Kernel low-level debugging functions (read help!)"
+114 −0
Original line number Diff line number Diff line
@@ -1586,6 +1586,119 @@ static void __init early_paging_init(const struct machine_desc *mdesc)

#endif

#ifdef CONFIG_FORCE_PAGES
/*
 * remap a PMD into pages
 * We split a single pmd here none of this two pmd nonsense
 */
static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
				unsigned long end, unsigned long pfn,
				const struct mem_type *type)
{
	pte_t *pte, *start_pte;
	pmd_t *base_pmd;

	base_pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
#ifndef CONFIG_ARM_LPAE
		/*
		 * Following is needed when new pte is allocated for pmd[1]
		 * cases, which may happen when base (start) address falls
		 * under pmd[1].
		 */
		if (addr & SECTION_SIZE)
			start_pte += pte_index(addr);
#endif
	} else {
		start_pte = pte_offset_kernel(base_pmd, addr);
	}

	pte = start_pte;

	do {
		set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
	mb(); /* let pmd be programmed */
	flush_pmd_entry(pmd);
	flush_tlb_all();
}

/*
 * It's significantly easier to remap as pages later after all memory is
 * mapped. Everything is sections so all we have to do is split
 */
static void __init remap_pages(void)
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		phys_addr_t phys_start = reg->base;
		phys_addr_t phys_end = reg->base + reg->size;
		unsigned long addr = (unsigned long)__va(phys_start);
		unsigned long end = (unsigned long)__va(phys_end);
		pmd_t *pmd = NULL;
		unsigned long next;
		unsigned long pfn = __phys_to_pfn(phys_start);
		bool fixup = false;
		unsigned long saved_start = addr;

		if (phys_start > arm_lowmem_limit)
			break;
		if (phys_end > arm_lowmem_limit)
			end = (unsigned long)__va(arm_lowmem_limit);
		if (phys_start >= phys_end)
			break;

		pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

#ifndef	CONFIG_ARM_LPAE
		if (addr & SECTION_SIZE) {
			fixup = true;
			pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
			pmd++;
		}

		if (end & SECTION_SIZE)
			pmd_empty_section_gap(end);
#endif

		do {
			next = addr + SECTION_SIZE;

			if (pmd_none(*pmd) || pmd_bad(*pmd))
				split_pmd(pmd, addr, next, pfn,
						&mem_types[MT_MEMORY_RWX]);
			pmd++;
			pfn += SECTION_SIZE >> PAGE_SHIFT;

		} while (addr = next, addr < end);

		if (fixup) {
			/*
			 * Put a faulting page table here to avoid detecting no
			 * pmd when accessing an odd section boundary. This
			 * needs to be faulting to help catch errors and avoid
			 * speculation
			 */
			pmd = pmd_off_k(saved_start);
			pmd[0] = pmd[1] & ~1;
		}
	}
}
#else
static void __init remap_pages(void)
{

}
#endif

static void __init early_fixmap_shutdown(void)
{
	int i;
@@ -1628,6 +1741,7 @@ void __init paging_init(const struct machine_desc *mdesc)
	memblock_set_current_limit(arm_lowmem_limit);
	dma_contiguous_remap();
	early_fixmap_shutdown();
	remap_pages();
	devicemaps_init(mdesc);
	kmap_init();
	tcm_init();
+2 −1
Original line number Diff line number Diff line
@@ -56,7 +56,8 @@ static int change_memory_common(unsigned long addr, int numpages,
	if (!size)
		return 0;

	if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
	if (!IS_ENABLED(CONFIG_FORCE_PAGES) &&
	    !in_range(start, size, MODULES_VADDR, MODULES_END) &&
	    !in_range(start, size, VMALLOC_START, VMALLOC_END))
		return -EINVAL;