Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6dd01bed authored by Tejun Heo's avatar Tejun Heo
Browse files

x86: prepare for tlb merge



Impact: clean up, ipi vector number reordering for x86_32

Make the following changes to prepare for tlb merge.

* reorder x86_32 ip vectors

* adjust tlb_32.c and tlb_64.c such that their logics coincide exactly
	- on spurious invalidate ipi, tlb_32 acks the irq
	- tlb_64 now has proper memory barriers around clearing
          flush_cpumask (no change in generated code)

* unexport flush_tlb_page from tlb_32.c, there's no user

* use unsigned int for cpu id

* drop unnecessary includes from tlb_64.c

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent bdbcdd48
Loading
Loading
Loading
Loading
+16 −17
Original line number Diff line number Diff line
@@ -49,18 +49,17 @@
 *  some of the following vectors are 'rare', they are merged
 *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
 *  TLB, reschedule and local APIC vectors are performance-critical.
 *
 *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
 */
#ifdef CONFIG_X86_32

# define SPURIOUS_APIC_VECTOR		0xff
# define ERROR_APIC_VECTOR		0xfe
# define INVALIDATE_TLB_VECTOR		0xfd
# define RESCHEDULE_VECTOR		0xfc
# define CALL_FUNCTION_VECTOR		0xfb
# define CALL_FUNCTION_SINGLE_VECTOR	0xfa
# define THERMAL_APIC_VECTOR		0xf0
# define RESCHEDULE_VECTOR		0xfd
# define CALL_FUNCTION_VECTOR		0xfc
# define CALL_FUNCTION_SINGLE_VECTOR	0xfb
# define THERMAL_APIC_VECTOR		0xfa
/* 0xf1 - 0xf9 : free */
# define INVALIDATE_TLB_VECTOR		0xf0

#else

+5 −5
Original line number Diff line number Diff line
@@ -84,13 +84,15 @@ EXPORT_SYMBOL_GPL(leave_mm);
 *
 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 * 2) Leave the mm if we are in the lazy tlb mode.
 *
 * Interrupts are disabled.
 */

void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned long cpu;
	unsigned int cpu;

	cpu = get_cpu();
	cpu = smp_processor_id();

	if (!cpumask_test_cpu(cpu, flush_cpumask))
		goto out;
@@ -112,12 +114,11 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu_no_resched();
	inc_irq_stat(irq_tlb_count);
}

@@ -215,7 +216,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
		flush_tlb_others(&mm->cpu_vm_mask, mm, va);
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);

static void do_flush_tlb_all(void *info)
{
+7 −11
Original line number Diff line number Diff line
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/interrupt.h>
#include <linux/module.h>

#include <asm/mtrr.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apicdef.h>
#include <asm/idle.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>

DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
@@ -121,8 +115,8 @@ EXPORT_SYMBOL_GPL(leave_mm);

asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{
	int cpu;
	int sender;
	unsigned int cpu;
	unsigned int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();
@@ -155,14 +149,16 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
	smp_mb__after_clear_bit();
	inc_irq_stat(irq_tlb_count);
}

static void flush_tlb_others_ipi(const struct cpumask *cpumask,
				 struct mm_struct *mm, unsigned long va)
{
	int sender;
	unsigned int sender;
	union smp_flush_state *f;

	/* Caller has disabled preemption */