Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14c89e7f authored by David Gibson's avatar David Gibson Committed by Paul Mackerras
Browse files

[PATCH] powerpc: Replace VMALLOCBASE with VMALLOC_START



On ppc64, we independently define VMALLOCBASE and VMALLOC_START to be
the same thing: the start of the vmalloc() area at 0xd000000000000000.
VMALLOC_START is used much more widely, including in generic code, so
this patch gets rid of the extraneous VMALLOCBASE.

This does require moving the definitions of region IDs from page_64.h
to pgtable.h, but they don't clearly belong in the former rather than
the latter, anyway.  While we're moving them, clean up the definitions
of the REGION_IDs:
	- Abolish REGION_SIZE, it was only used once, to define
REGION_MASK anyway
	- Define the specific region ids in terms of the REGION_ID()
macro.
	- Define KERNEL_REGION_ID in terms of PAGE_OFFSET rather than
KERNELBASE.  It amounts to the same thing, but conceptually this is
about the region of the linear mapping (which starts at PAGE_OFFSET)
rather than of the kernel text itself (which is at KERNELBASE).

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 56c8eaee
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -18,8 +18,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
	.xEsids = {
		{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
		  .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
		{ .xKernelEsid = GET_ESID(VMALLOCBASE),
		  .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
		{ .xKernelEsid = GET_ESID(VMALLOC_START),
		  .xKernelVsid = KERNEL_VSID(VMALLOC_START), },
	},

	.xRanges = {
+3 −3
Original line number Diff line number Diff line
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void)
		     /* Slot 2 - kernel stack */
		     "slbmte	%2,%3\n"
		     "isync"
		     :: "r"(mk_vsid_data(VMALLOCBASE, vflags)),
		        "r"(mk_esid_data(VMALLOCBASE, 1)),
		     :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
		        "r"(mk_esid_data(VMALLOC_START, 1)),
		        "r"(mk_vsid_data(ksp_esid_data, lflags)),
		        "r"(ksp_esid_data)
		     : "memory");
@@ -216,7 +216,7 @@ void slb_initialize(void)
	create_slbe(PAGE_OFFSET, lflags, 0);

	/* VMALLOC space has 4K pages always for now */
	create_slbe(VMALLOCBASE, vflags, 1);
	create_slbe(VMALLOC_START, vflags, 1);

	/* We don't bolt the stack for the time being - we're in boot,
	 * so the stack is in the bolted segment.  By the time it goes
+0 −10
Original line number Diff line number Diff line
@@ -25,16 +25,6 @@
 */
#define PAGE_FACTOR		(PAGE_SHIFT - HW_PAGE_SHIFT)

#define REGION_SIZE   4UL
#define REGION_SHIFT  60UL
#define REGION_MASK   (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)

#define VMALLOCBASE		ASM_CONST(0xD000000000000000)
#define VMALLOC_REGION_ID	(VMALLOCBASE >> REGION_SHIFT)
#define KERNEL_REGION_ID	(KERNELBASE >> REGION_SHIFT)
#define USER_REGION_ID		(0UL)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

/* Segment size */
#define SID_SHIFT		28
#define SID_MASK		0xfffffffffUL
+11 −0
Original line number Diff line number Diff line
@@ -57,6 +57,17 @@ struct mm_struct;
#define IMALLOC_BASE	(PHBS_IO_BASE + 0x80000000ul)	/* Reserve 2 gigs for PHBs */
#define IMALLOC_END	(VMALLOC_START + PGTABLE_RANGE)

/*
 * Region IDs
 */
#define REGION_SHIFT		60UL
#define REGION_MASK		(0xfUL << REGION_SHIFT)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

#define VMALLOC_REGION_ID	(REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
#define USER_REGION_ID		(0UL)

/*
 * Common bits in a linux-style PTE.  These match the bits in the
 * (hardware-defined) PowerPC PTE as closely as possible. Additional