Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5200ec9 authored by Reza Arbab's avatar Reza Arbab Committed by Michael Ellerman
Browse files

powerpc/mm: refactor radix physical page mapping



Move the page mapping code in radix_init_pgtable() into a separate
function that will also be used for memory hotplug.

The current goto loop progressively decreases its mapping size as it
covers the tail of a range whose end is unaligned. Change this to a for
loop which can do the same for both ends of the range.

Signed-off-by: default avatarReza Arbab <arbab@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 023b13a5
Loading
Loading
Loading
Loading
+50 −38
Original line number Diff line number Diff line
@@ -108,54 +108,66 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
	return 0;
}

static void __init radix_init_pgtable(void)
static inline void __meminit print_mapping(unsigned long start,
					   unsigned long end,
					   unsigned long size)
{
	int loop_count;
	u64 base, end, start_addr;
	unsigned long rts_field;
	struct memblock_region *reg;
	unsigned long linear_page_size;
	if (end <= start)
		return;

	/* We don't support slb for radix */
	mmu_slb_size = 0;
	/*
	 * Create the linear mapping, using standard page size for now
	 */
	loop_count = 0;
	for_each_memblock(memory, reg) {
	pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
}

static int __meminit create_physical_mapping(unsigned long start,
					     unsigned long end)
{
	unsigned long addr, mapping_size = 0;

	start = _ALIGN_UP(start, PAGE_SIZE);
	for (addr = start; addr < end; addr += mapping_size) {
		unsigned long gap, previous_size;
		int rc;

		start_addr = reg->base;
		gap = end - addr;
		previous_size = mapping_size;

redo:
		if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
			linear_page_size = PUD_SIZE;
		else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
			linear_page_size = PMD_SIZE;
		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
		    mmu_psize_defs[MMU_PAGE_1G].shift)
			mapping_size = PUD_SIZE;
		else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
			 mmu_psize_defs[MMU_PAGE_2M].shift)
			mapping_size = PMD_SIZE;
		else
			linear_page_size = PAGE_SIZE;
			mapping_size = PAGE_SIZE;

		base = _ALIGN_UP(start_addr, linear_page_size);
		end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
		if (mapping_size != previous_size) {
			print_mapping(start, addr, previous_size);
			start = addr;
		}

		pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
			(unsigned long)base, (unsigned long)end,
			linear_page_size);
		rc = radix__map_kernel_page((unsigned long)__va(addr), addr,
					    PAGE_KERNEL_X, mapping_size);
		if (rc)
			return rc;
	}

		while (base < end) {
			radix__map_kernel_page((unsigned long)__va(base),
					      base, PAGE_KERNEL_X,
					      linear_page_size);
			base += linear_page_size;
	print_mapping(start, addr, mapping_size);
	return 0;
}

static void __init radix_init_pgtable(void)
{
	unsigned long rts_field;
	struct memblock_region *reg;

	/* We don't support slb for radix */
	mmu_slb_size = 0;
	/*
		 * map the rest using lower page size
	 * Create the linear mapping, using standard page size for now
	 */
		if (end < reg->base + reg->size) {
			start_addr = end;
			loop_count++;
			goto redo;
		}
	}
	for_each_memblock(memory, reg)
		WARN_ON(create_physical_mapping(reg->base,
						reg->base + reg->size));
	/*
	 * Allocate Partition table and process table for the
	 * host.