Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 492e6751 authored by David Howells's avatar David Howells
Browse files

MN10300: Rename __flush_tlb*() to local_flush_tlb*()



Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready
to differentiate local from global TLB flushes when SMP is introduced.

Whilst we're at it, get rid of __flush_tlb_global() and make
local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer.

Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 8f19e3da
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
		BUG();
#endif
	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
	__flush_tlb_one(vaddr);
	local_flush_tlb_one(vaddr);

	return vaddr;
}
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
		 * this pte without first remap it
		 */
		pte_clear(kmap_pte - idx);
		__flush_tlb_one(vaddr);
		local_flush_tlb_one(vaddr);
	}
#endif
	pagefault_enable();
+1 −1
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
	if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
		/* we exhausted the TLB PIDs of this version on this CPU, so we
		 * flush this CPU's TLB in its entirety and start new cycle */
		flush_tlb_all();
		local_flush_tlb_all();

		/* fix the TLB version if needed (we avoid version #0 so as to
		 * distingush MMU_NO_CONTEXT) */
+35 −21
Original line number Diff line number Diff line
@@ -13,21 +13,37 @@

#include <asm/processor.h>

#define __flush_tlb()						\
do {								\
	int w;							\
	__asm__ __volatile__					\
		("	mov %1,%0		\n"		\
		 "	or %2,%0		\n"		\
		 "	mov %0,%1		\n"		\
		 : "=d"(w)					\
		 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)	\
		 : "cc", "memory"				\
		 );						\
} while (0)
/**
 * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
 */
static inline void local_flush_tlb(void)
{
	int w;
	asm volatile(
		"	mov	%1,%0		\n"
		"	or	%2,%0		\n"
		"	mov	%0,%1		\n"
		: "=d"(w)
		: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
		: "cc", "memory");
}

#define __flush_tlb_all() __flush_tlb()
#define __flush_tlb_one(addr) __flush_tlb()
/**
 * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
 */
#define local_flush_tlb_all()		local_flush_tlb()

/**
 * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
 */
#define local_flush_tlb_one(addr)	local_flush_tlb()

/**
 * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
 * @mm: The MM to flush for
 * @addr: The address of the target page in RAM (not its page struct)
 */
extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);


/*
@@ -43,14 +59,14 @@ do { \
#define flush_tlb_all()				\
do {						\
	preempt_disable();			\
	__flush_tlb_all();			\
	local_flush_tlb_all();			\
	preempt_enable();			\
} while (0)

#define flush_tlb_mm(mm)			\
do {						\
	preempt_disable();			\
	__flush_tlb_all();			\
	local_flush_tlb_all();			\
	preempt_enable();			\
} while (0)

@@ -59,13 +75,13 @@ do { \
	unsigned long __s __attribute__((unused)) = (start);	\
	unsigned long __e __attribute__((unused)) = (end);	\
	preempt_disable();					\
	__flush_tlb_all();					\
	local_flush_tlb_all();					\
	preempt_enable();					\
} while (0)


#define __flush_tlb_global()			flush_tlb_all()
#define flush_tlb_page(vma, addr)	local_flush_tlb_page((vma)->vm_mm, addr)
#define flush_tlb()			flush_tlb_all()

#define flush_tlb_kernel_range(start, end)			\
do {								\
	unsigned long __s __attribute__((unused)) = (start);	\
@@ -73,8 +89,6 @@ do { \
	flush_tlb_all();					\
} while (0)

extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);

#define flush_tlb_pgtables(mm, start, end)	do {} while (0)

#endif /* _ASM_TLBFLUSH_H */
+1 −1
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ void __init paging_init(void)
	/* pass the memory from the bootmem allocator to the main allocator */
	free_area_init(zones_size);

	__flush_tlb_all();
	local_flush_tlb_all();
}

/*
+2 −2
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ unsigned long mmu_context_cache[NR_CPUS] = {
/*
 * flush the specified TLB entry
 */
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
{
	unsigned long pteu, cnx, flags;

@@ -33,7 +33,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
	 * interference from vmalloc'd regions */
	local_irq_save(flags);

	cnx = mm_context(vma->vm_mm);
	cnx = mm_context(mm);

	if (cnx != MMU_NO_CONTEXT) {
		pteu = addr | (cnx & 0x000000ffUL);
Loading