Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b9c99b6 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86: cleanup tlbflush.h variants



Bring the tlbflush.h variants into sync to prepare merging and
paravirt support.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1075cf7a
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -55,7 +55,6 @@ union smp_flush_state {
		cpumask_t flush_cpumask;
		struct mm_struct *flush_mm;
		unsigned long flush_va;
#define FLUSH_ALL	-1ULL
		spinlock_t tlbstate_lock;
	};
	char pad[SMP_CACHE_BYTES];
@@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)

	if (f->flush_mm == read_pda(active_mm)) {
		if (read_pda(mmu_state) == TLBSTATE_OK) {
			if (f->flush_va == FLUSH_ALL)
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
@@ -166,11 +165,12 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
	add_pda(irq_tlb_count, 1);
}

static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
			     unsigned long va)
{
	int sender;
	union smp_flush_state *f;
	cpumask_t cpumask = *cpumaskp;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@@ -223,7 +223,7 @@ void flush_tlb_current_task(void)

	local_flush_tlb();
	if (!cpus_empty(cpu_mask))
		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
	preempt_enable();
}

@@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm)
			leave_mm(smp_processor_id());
	}
	if (!cpus_empty(cpu_mask))
		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);

	preempt_enable();
}
+3 −4
Original line number Diff line number Diff line
@@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void)
static struct mm_struct *flush_mm;
static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
#define FLUSH_ALL	0xffffffff

/*
 * We cannot call mmdrop() because we are in interrupt context,
@@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void)

	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
			if (flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
@@ -903,7 +902,7 @@ void flush_tlb_current_task(void)
	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
	local_flush_tlb();
	if (cpu_mask)
		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);

	preempt_enable();
}
@@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm)
			leave_mm(smp_processor_id());
	}
	if (cpu_mask)
		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);

	preempt_enable();
}
+1 −1
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages,
	pte = boot_vaddr_to_pte(virtual_source);
	for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
		set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
		__flush_tlb_one(&vaddr[i*PAGE_SIZE]);
		__flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]);
	}
}

+9 −0
Original line number Diff line number Diff line
@@ -163,6 +163,12 @@
#define cpu_has_clflush		boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts		boot_cpu_has(X86_FEATURE_BTS)

#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg		1
#else
# define cpu_has_invlpg		(boot_cpu_data.x86 > 3)
#endif

#ifdef CONFIG_X86_64

#undef  cpu_has_vme
@@ -183,6 +189,9 @@
#undef  cpu_has_centaur_mcr
#define cpu_has_centaur_mcr	0

#undef  cpu_has_pge
#define cpu_has_pge		1

#endif /* CONFIG_X86_64 */

#endif /* _ASM_X86_CPUFEATURE_H */
+50 −70
Original line number Diff line number Diff line
#ifndef _I386_TLBFLUSH_H
#define _I386_TLBFLUSH_H
#ifndef _X86_TLBFLUSH_H
#define _X86_TLBFLUSH_H

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
#include <asm/system.h>

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
@@ -12,62 +15,41 @@
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

#define __native_flush_tlb()						\
	do {								\
		unsigned int tmpreg;					\
									\
		__asm__ __volatile__(					\
			"movl %%cr3, %0;              \n"		\
			"movl %0, %%cr3;  # flush TLB \n"		\
			: "=r" (tmpreg)					\
			:: "memory");					\
	} while (0)
static inline void __native_flush_tlb(void)
{
	write_cr3(read_cr3());
}

/*
 * Global pages have to be flushed a bit differently. Not a real
 * performance problem because this does not happen often.
 */
#define __native_flush_tlb_global()					\
	do {								\
		unsigned int tmpreg, cr4, cr4_orig;			\
									\
		__asm__ __volatile__(					\
			"movl %%cr4, %2;  # turn off PGE     \n"	\
			"movl %2, %1;                        \n"	\
			"andl %3, %1;                        \n"	\
			"movl %1, %%cr4;                     \n"	\
			"movl %%cr3, %0;                     \n"	\
			"movl %0, %%cr3;  # flush TLB        \n"	\
			"movl %2, %%cr4;  # turn PGE back on \n"	\
			: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig)	\
			: "i" (~X86_CR4_PGE)				\
			: "memory");					\
	} while (0)

#define __native_flush_tlb_single(addr) 				\
	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")

# define __flush_tlb_all()						\
	do {								\
		if (cpu_has_pge)					\
			__flush_tlb_global();				\
		else							\
			__flush_tlb();					\
	} while (0)

#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)

#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
# define __flush_tlb_one(addr)						\
	do {								\
		if (cpu_has_invlpg)					\
			__flush_tlb_single(addr);			\
		else							\
			__flush_tlb();					\
	} while (0)
#endif
static inline void __native_flush_tlb_global(void)
{
	unsigned long cr4 = read_cr4();

	/* clear PGE */
	write_cr4(cr4 & ~X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	write_cr4(cr4);
}

static inline void __native_flush_tlb_single(unsigned long addr)
{
	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory");
}

static inline void __flush_tlb_all(void)
{
	if (cpu_has_pge)
		__flush_tlb_global();
	else
		__flush_tlb();
}

static inline void __flush_tlb_one(unsigned long addr)
{
	if (cpu_has_invlpg)
		__flush_tlb_single(addr);
	else
		__flush_tlb();
}

/*
 * TLB flushing:
@@ -86,11 +68,8 @@

#define TLB_FLUSH_ALL	0xffffffff


#ifndef CONFIG_SMP

#include <linux/sched.h>

#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb()
@@ -116,7 +95,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}

static inline void native_flush_tlb_others(const cpumask_t *cpumask,
					   struct mm_struct *mm, unsigned long va)
					   struct mm_struct *mm,
					   unsigned long va)
{
}

@@ -124,8 +104,7 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,

#include <asm/smp.h>

#define local_flush_tlb() \
	__flush_tlb()
#define local_flush_tlb() __flush_tlb()

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
@@ -134,7 +113,8 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long);

#define flush_tlb()	flush_tlb_current_task()

static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	flush_tlb_mm(vma->vm_mm);
}
@@ -152,11 +132,11 @@ struct tlb_state
	char __cacheline_padding[L1_CACHE_BYTES-8];
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);

#endif	/* SMP */

#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va)		\
	native_flush_tlb_others(&mask, mm, va)
#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(&mask, mm, va)
#endif

static inline void flush_tlb_kernel_range(unsigned long start,
@@ -165,4 +145,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
	flush_tlb_all();
}

#endif /* _I386_TLBFLUSH_H */
#endif /* _X86_TLBFLUSH_H */
Loading