Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0c5ba8c authored by Mark Rutland's avatar Mark Rutland Committed by Greg Kroah-Hartman
Browse files

UPSTREAM: arm64: tlbflush.h: add __tlbi() macro



As with dsb() and isb(), add a __tlbi() helper so that we can avoid
distracting asm boilerplate every time we want a TLBI. As some TLBI
operations take an argument while others do not, some pre-processor is
used to handle these two cases with different assembly blocks.

The existing tlbflush.h code is moved over to use the helper.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
[ rename helper to __tlbi, update comment and commit log ]
Signed-off-by: default avatarPunit Agrawal <punit.agrawal@arm.com>
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
(cherry picked from commit db68f3e7594aca77632d56c449bd36c6c931d59a)

Change-Id: I9b94aff5efd20e3485dfa3a2780e1f8130e60d52
Signed-off-by: default avatarGreg Hackmann <ghackmann@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parent a56758fe
Loading
Loading
Loading
Loading
+26 −8
Original line number Diff line number Diff line
@@ -24,6 +24,24 @@
#include <linux/sched.h>
#include <asm/cputype.h>

/*
 * Raw TLBI operations.
 *
 * Where necessary, use the __tlbi() macro to avoid asm()
 * boilerplate. Drivers and most kernel code should use the TLB
 * management routines in preference to the macro below.
 *
 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
 * on whether a particular TLBI operation takes an argument or
 * not. The macros handles invoking the asm with or without the
 * register argument as appropriate.
 */
#define __TLBI_0(op, arg)		asm ("tlbi " #op)
#define __TLBI_1(op, arg)		asm ("tlbi " #op ", %0" : : "r" (arg))
#define __TLBI_N(op, arg, n, ...)	__TLBI_##n(op, arg)

#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)

/*
 *	TLB Management
 *	==============
@@ -66,7 +84,7 @@
static inline void local_flush_tlb_all(void)
{
	dsb(nshst);
	asm("tlbi	vmalle1");
	__tlbi(vmalle1);
	dsb(nsh);
	isb();
}
@@ -74,7 +92,7 @@ static inline void local_flush_tlb_all(void)
static inline void flush_tlb_all(void)
{
	dsb(ishst);
	asm("tlbi	vmalle1is");
	__tlbi(vmalle1is);
	dsb(ish);
	isb();
}
@@ -84,7 +102,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
	unsigned long asid = ASID(mm) << 48;

	dsb(ishst);
	asm("tlbi	aside1is, %0" : : "r" (asid));
	__tlbi(aside1is, asid);
	dsb(ish);
}

@@ -94,7 +112,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);

	dsb(ishst);
	asm("tlbi	vale1is, %0" : : "r" (addr));
	__tlbi(vale1is, addr);
	dsb(ish);
}

@@ -122,9 +140,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
		if (last_level)
			asm("tlbi vale1is, %0" : : "r"(addr));
			__tlbi(vale1is, addr);
		else
			asm("tlbi vae1is, %0" : : "r"(addr));
			__tlbi(vae1is, addr);
	}
	dsb(ish);
}
@@ -149,7 +167,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
		asm("tlbi vaae1is, %0" : : "r"(addr));
		__tlbi(vaae1is, addr);
	dsb(ish);
	isb();
}
@@ -163,7 +181,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
{
	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);

	asm("tlbi	vae1is, %0" : : "r" (addr));
	__tlbi(vae1is, addr);
	dsb(ish);
}