Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa48e6f7 authored by Steve Capper's avatar Steve Capper Committed by Catalin Marinas
Browse files

arm64: mm: Optimise tlb flush logic where we have >4K granule



The tlb maintainence functions: __cpu_flush_user_tlb_range and
__cpu_flush_kern_tlb_range do not take into consideration the page
granule when looping through the address range, and repeatedly flush
tlb entries for the same page when operating with 64K pages.

This patch re-works the logic s.t. we instead advance the loop by
 1 << (PAGE_SHIFT - 12), so avoid repeating ourselves.

Also the routines have been converted from assembler to static inline
functions to aid with legibility and potential compiler optimisations.

The isb() has been removed from flush_tlb_kernel_range(.) as it is
only needed when changing the execute permission of a mapping. If one
needs to set an area of the kernel as execute/non-execute an isb()
must be inserted after the call to flush_tlb_kernel_range.

Cc: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarSteve Capper <steve.capper@linaro.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e1dfda9c
Loading
Loading
Loading
Loading
+25 −5
Original line number Diff line number Diff line
@@ -98,11 +98,31 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
	dsb();
}

/*
 * Convert calls to our calling convention.
 */
#define flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
static inline void flush_tlb_range(struct vm_area_struct *vma,
					unsigned long start, unsigned long end)
{
	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
	unsigned long addr;
	start = asid | (start >> 12);
	end = asid | (end >> 12);

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
		asm("tlbi vae1is, %0" : : "r"(addr));
	dsb(ish);
}

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long addr;
	start >>= 12;
	end >>= 12;

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
		asm("tlbi vaae1is, %0" : : "r"(addr));
	dsb(ish);
}

/*
 * On AArch64, the cache coherency is handled via the set_pte_at() function.
+1 −1
Original line number Diff line number Diff line
obj-y				:= dma-mapping.o extable.o fault.o init.o \
				   cache.o copypage.o flush.o \
				   ioremap.o mmap.o pgd.o mmu.o \
				   context.o tlb.o proc.o
				   context.o proc.o
obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o

arch/arm64/mm/tlb.S

deleted100644 → 0
+0 −71
Original line number Diff line number Diff line
/*
 * Based on arch/arm/mm/tlb.S
 *
 * Copyright (C) 1997-2002 Russell King
 * Copyright (C) 2012 ARM Ltd.
 * Written by Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"

/*
 *	__cpu_flush_user_tlb_range(start, end, vma)
 *
 *	Invalidate a range of TLB entries in the specified address space.
 *
 *	- start - start address (may not be aligned)
 *	- end   - end address (exclusive, may not be aligned)
 *	- vma   - vma_struct describing address range
 */
ENTRY(__cpu_flush_user_tlb_range)
	vma_vm_mm x3, x2			// get vma->vm_mm
	mmid	w3, x3				// get vm_mm->context.id
	dsb	sy
	lsr	x0, x0, #12			// align address
	lsr	x1, x1, #12
	bfi	x0, x3, #48, #16		// start VA and ASID
	bfi	x1, x3, #48, #16		// end VA and ASID
1:	tlbi	vae1is, x0			// TLB invalidate by address and ASID
	add	x0, x0, #1
	cmp	x0, x1
	b.lo	1b
	dsb	sy
	ret
ENDPROC(__cpu_flush_user_tlb_range)

/*
 *	__cpu_flush_kern_tlb_range(start,end)
 *
 *	Invalidate a range of kernel TLB entries.
 *
 *	- start - start address (may not be aligned)
 *	- end   - end address (exclusive, may not be aligned)
 */
ENTRY(__cpu_flush_kern_tlb_range)
	dsb	sy
	lsr	x0, x0, #12			// align address
	lsr	x1, x1, #12
1:	tlbi	vaae1is, x0			// TLB invalidate by address
	add	x0, x0, #1
	cmp	x0, x1
	b.lo	1b
	dsb	sy
	isb
	ret
ENDPROC(__cpu_flush_kern_tlb_range)