Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5666f70 authored by Michael Ellerman's avatar Michael Ellerman Committed by Paul Mackerras
Browse files

[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET



This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't
looked at any of the PPC32 code, if we ever want to support Kdump on
PPC we'll have to do another audit, ditto for iSeries.

This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1
gazillion for 64-bit.

To get a physical address from a virtual one you subtract PAGE_OFFSET,
_not_ KERNELBASE.

KERNELBASE is the virtual address of the start of the kernel, it's
often the same as PAGE_OFFSET, but _might not be_.

If you want to know something's offset from the start of the kernel
you should subtract KERNELBASE.

Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 51fae6de
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ int force_printk_to_btext = 0;
 *
 * The display is mapped to virtual address 0xD0000000, rather
 * than 1:1, because some some CHRP machines put the frame buffer
 * in the region starting at 0xC0000000 (KERNELBASE).
 * in the region starting at 0xC0000000 (PAGE_OFFSET).
 * This mapping is temporary and will disappear as soon as the
 * setup done by MMU_Init() is applied.
 *
@@ -71,7 +71,7 @@ int force_printk_to_btext = 0;
 */
void __init btext_prepare_BAT(void)
{
	unsigned long vaddr = KERNELBASE + 0x10000000;
	unsigned long vaddr = PAGE_OFFSET + 0x10000000;
	unsigned long addr;
	unsigned long lowbits;

+2 −2
Original line number Diff line number Diff line
@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas)

	/* Setup our real return addr */	
	SET_REG_TO_LABEL(r4,.rtas_return_loc)
	SET_REG_TO_CONST(r9,KERNELBASE)
	SET_REG_TO_CONST(r9,PAGE_OFFSET)
	sub	r4,r4,r9
       	mtlr	r4

@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc)
	/* relocation is off at this point */
	mfspr	r4,SPRN_SPRG3	        /* Get PACA */
	SET_REG_TO_CONST(r5, KERNELBASE)
	SET_REG_TO_CONST(r5, PAGE_OFFSET)
        sub     r4,r4,r5                /* RELOC the PACA base pointer */

	mfmsr   r6
+3 −3
Original line number Diff line number Diff line
@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
	.xSegmentTableOffs = STAB0_PAGE,

	.xEsids = {
		{ .xKernelEsid = GET_ESID(KERNELBASE),
		  .xKernelVsid = KERNEL_VSID(KERNELBASE), },
		{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
		  .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
		{ .xKernelEsid = GET_ESID(VMALLOCBASE),
		  .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
	},
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
	.xRanges = {
		{ .xPages = HvPagesToMap,
		  .xOffset = 0,
		  .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
		  .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
		},
	},
};
+2 −3
Original line number Diff line number Diff line
@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *image)
	 * including ones that were in place on the original copy
	 */
	for (i = 0; i < nr_segments; i++)
		flush_icache_range(ranges[i].mem + KERNELBASE,
				ranges[i].mem + KERNELBASE +
				ranges[i].memsz);
		flush_icache_range((unsigned long)__va(ranges[i].mem),
			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
}

#ifdef CONFIG_SMP
+3 −3
Original line number Diff line number Diff line
@@ -456,7 +456,7 @@ void __init htab_initialize(void)

	/* create bolted the linear mapping in the hash table */
	for (i=0; i < lmb.memory.cnt; i++) {
		base = lmb.memory.region[i].base + KERNELBASE;
		base = (unsigned long)__va(lmb.memory.region[i].base);
		size = lmb.memory.region[i].size;

		DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
	 * for either 4K or 16MB pages.
	 */
	if (tce_alloc_start) {
		tce_alloc_start += KERNELBASE;
		tce_alloc_end += KERNELBASE;
		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
		tce_alloc_end = (unsigned long)__va(tce_alloc_end);

		if (base + size >= tce_alloc_start)
			tce_alloc_start = base + size + 1;
Loading