Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5f53d286 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm/hash: Rename KERNEL_REGION_ID to LINEAR_MAP_REGION_ID



The region actually point to linear map. Rename the #define to
clarify thati.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a092a03f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@
 * Region IDs
 */
#define USER_REGION_ID		0
#define KERNEL_REGION_ID	1
#define LINEAR_MAP_REGION_ID	1
#define VMALLOC_REGION_ID	NON_LINEAR_REGION_ID(H_VMALLOC_START)
#define IO_REGION_ID		NON_LINEAR_REGION_ID(H_KERN_IO_START)
#define VMEMMAP_REGION_ID	NON_LINEAR_REGION_ID(H_VMEMMAP_START)
@@ -120,7 +120,7 @@ static inline int get_region_id(unsigned long ea)
		return USER_REGION_ID;

	if (ea < H_KERN_VIRT_START)
		return KERNEL_REGION_ID;
		return LINEAR_MAP_REGION_ID;

	VM_BUG_ON(id != 0xc);
	BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
+1 −1
Original line number Diff line number Diff line
@@ -817,7 +817,7 @@ static inline unsigned long get_kernel_context(unsigned long ea)
	 * Depending on Kernel config, kernel region can have one context
	 * or more.
	 */
	if (region_id == KERNEL_REGION_ID) {
	if (region_id == LINEAR_MAP_REGION_ID) {
		/*
		 * We already verified ea to be not beyond the addr limit.
		 */
+2 −2
Original line number Diff line number Diff line
@@ -129,8 +129,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
		vsidkey = SLB_VSID_KERNEL;
		break;
	case KERNEL_REGION_ID:
		pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
	case LINEAR_MAP_REGION_ID:
		pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
		psize = mmu_linear_psize;
		ssize = mmu_kernel_ssize;
		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+2 −2
Original line number Diff line number Diff line
@@ -691,7 +691,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
	unsigned long flags;
	int ssize;

	if (id == KERNEL_REGION_ID) {
	if (id == LINEAR_MAP_REGION_ID) {

		/* We only support upto MAX_PHYSMEM_BITS */
		if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
@@ -790,7 +790,7 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
	 * first class kernel code. But for performance it's probably nicer
	 * if they go via fast_exception_return too.
	 */
	if (id >= KERNEL_REGION_ID) {
	if (id >= LINEAR_MAP_REGION_ID) {
		long err;
#ifdef CONFIG_DEBUG_VM
		/* Catch recursive kernel SLB faults. */
+1 −1
Original line number Diff line number Diff line
@@ -224,7 +224,7 @@ static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
	unsigned long ea = (unsigned long)addr;
	u64 llp;

	if (get_region_id(ea) == KERNEL_REGION_ID)
	if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
		llp = mmu_psize_defs[mmu_linear_psize].sllp;
	else
		llp = mmu_psize_defs[mmu_virtual_psize].sllp;