Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 667c2759 authored by Catalin Marinas's avatar Catalin Marinas
Browse files

Revert "arm64: Mark kernel page ranges contiguous"



This reverts commit 348a65cd.

Incorrect page table manipulation that does not respect the ARM ARM
recommended break-before-make sequence may lead to TLB conflicts. The
contiguous PTE patch makes the system even more susceptible to such
errors by changing the mapping from a single page to a contiguous range
of pages. An additional TLB invalidation would reduce the risk window,
however, the correct fix is to switch to a temporary swapper_pg_dir.
Once the correct workaround is done, the reverted commit will be
re-applied.

Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarJeremy Linton <jeremy.linton@arm.com>
parent 0ebea808
Loading
Loading
Loading
Loading
+8 −61
Original line number Diff line number Diff line
@@ -85,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
	do {
		/*
		 * Need to have the least restrictive permissions available
		 * permissions will be fixed up later. Default the new page
		 * range as contiguous ptes.
		 * permissions will be fixed up later
		 */
		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
		pfn++;
	} while (pte++, i++, i < PTRS_PER_PTE);
}

/*
 * Given a PTE with the CONT bit set, determine where the CONT range
 * starts, and clear the entire range of PTE CONT bits.
 */
static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
{
	int i;

	pte -= CONT_RANGE_OFFSET(addr);
	for (i = 0; i < CONT_PTES; i++) {
		set_pte(pte, pte_mknoncont(*pte));
		pte++;
	}
	flush_tlb_all();
}

/*
 * Given a range of PTEs set the pfn and provided page protection flags
 */
static void __populate_init_pte(pte_t *pte, unsigned long addr,
				unsigned long end, phys_addr_t phys,
				pgprot_t prot)
{
	unsigned long pfn = __phys_to_pfn(phys);

	do {
		/* clear all the bits except the pfn, then apply the prot */
		set_pte(pte, pfn_pte(pfn, prot));
		pte++;
		pfn++;
		addr += PAGE_SIZE;
	} while (addr != end);
}

static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, phys_addr_t phys,
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pte_t *pte;
	unsigned long next;

	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
		pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -146,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,

	pte = pte_offset_kernel(pmd, addr);
	do {
		next = min(end, (addr + CONT_SIZE) & CONT_MASK);
		if (((addr | next | phys) & ~CONT_MASK) == 0) {
			/* a block of CONT_PTES  */
			__populate_init_pte(pte, addr, next, phys,
					    __pgprot(pgprot_val(prot) | PTE_CONT));
		} else {
			/*
			 * If the range being split is already inside of a
			 * contiguous range but this PTE isn't going to be
			 * contiguous, then we want to unmark the adjacent
			 * ranges, then update the portion of the range we
			 * are interrested in.
			 */
			 clear_cont_pte_range(pte, addr);
			 __populate_init_pte(pte, addr, next, phys, prot);
		}

		pte += (next - addr) >> PAGE_SHIFT;
		phys += next - addr;
		addr = next;
	} while (addr != end);
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -227,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, phys, prot, alloc);
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);