Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce6e6c84 authored by Catalin Marinas's avatar Catalin Marinas Committed by Sami Tolvanen
Browse files

UPSTREAM: arm64: Use last level TLBI for user pte changes



The flush_tlb_page() function is used on user address ranges when PTEs
(or PMDs/PUDs for huge pages) were changed (attributes or clearing). For
such cases, it is more efficient to invalidate only the last level of
the TLB with the "tlbi vale1is" instruction.

In the TLB shoot-down case, the TLB caching of the intermediate page
table levels (pmd, pud, pgd) is handled by __flush_tlb_pgtable() via the
__(pte|pmd|pud)_free_tlb() functions and it is not deferred to
tlb_finish_mmu() (as of commit 285994a62c80 - "arm64: Invalidate the TLB
corresponding to intermediate page table levels"). The tlb_flush()
function only needs to invalidate the TLB for the last level of page
tables; the __flush_tlb_range() function gains a fourth argument for
last level TLBI.

Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>

Bug: 31432001
Change-Id: I6731fc666f032c79086c48998d02c2603129ff8e
(cherry picked from commit 4150e50bf5f2171fbe7dfdbc7f2cdf44676b79a4)
Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
parent 8e6e4a7e
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
		flush_tlb_mm(tlb->mm);
	} else {
		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
		flush_tlb_range(&vma, tlb->start, tlb->end);
		/*
		 * The intermediate page table levels are already handled by
		 * the __(pte|pmd|pud)_free_tlb() functions, so last level
		 * TLBI is sufficient here.
		 */
		__flush_tlb_range(&vma, tlb->start, tlb->end, true);
	}
}

+16 −5
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
		((unsigned long)ASID(vma->vm_mm) << 48);

	dsb(ishst);
	asm("tlbi	vae1is, %0" : : "r" (addr));
	asm("tlbi	vale1is, %0" : : "r" (addr));
	dsb(ish);
}

@@ -97,8 +97,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 */
#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
static inline void __flush_tlb_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     bool last_level)
{
	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
	unsigned long addr;
@@ -112,11 +113,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
	end = asid | (end >> 12);

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
		if (last_level)
			asm("tlbi vale1is, %0" : : "r"(addr));
		else
			asm("tlbi vae1is, %0" : : "r"(addr));
	}
	dsb(ish);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	__flush_tlb_range(vma, start, end, false);
}

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long addr;