Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3288106 authored by Yong Wang's avatar Yong Wang Committed by Ingo Molnar
Browse files

perf_counter/x86: Remove the IRQ (non-NMI) handling bits



Remove the IRQ (non-NMI) handling bits as NMI will be used always.

Signed-off-by: default avatarYong Wang <yong.y.wang@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090603051255.GA2791@ywang-moblin2.bj.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent addc2785
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@ BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)

#ifdef CONFIG_PERF_COUNTERS
BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
#endif

+0 −1
Original line number Diff line number Diff line
@@ -29,7 +29,6 @@
extern void apic_timer_interrupt(void);
extern void generic_interrupt(void);
extern void error_interrupt(void);
extern void perf_counter_interrupt(void);
extern void perf_pending_interrupt(void);

extern void spurious_interrupt(void);
+0 −5
Original line number Diff line number Diff line
@@ -106,11 +106,6 @@
 */
#define LOCAL_TIMER_VECTOR		0xef

/*
 * Performance monitoring interrupt vector:
 */
#define LOCAL_PERF_VECTOR		0xee

/*
 * Generic system vector for platform specific use
 */
+6 −15
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@ struct cpu_hw_counters {
struct x86_pmu {
	const char	*name;
	int		version;
	int		(*handle_irq)(struct pt_regs *, int);
	int		(*handle_irq)(struct pt_regs *);
	void		(*disable_all)(void);
	void		(*enable_all)(void);
	void		(*enable)(struct hw_perf_counter *, int);
@@ -755,7 +755,7 @@ static void intel_pmu_reset(void)
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
 */
static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
	struct cpu_hw_counters *cpuc;
	struct cpu_hw_counters;
@@ -794,7 +794,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
		if (!intel_pmu_save_and_restart(counter))
			continue;

		if (perf_counter_overflow(counter, nmi, regs, 0))
		if (perf_counter_overflow(counter, 1, regs, 0))
			intel_pmu_disable_counter(&counter->hw, bit);
	}

@@ -812,7 +812,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
	return 1;
}

static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
	int cpu, idx, handled = 0;
	struct cpu_hw_counters *cpuc;
@@ -840,22 +840,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
		if (!x86_perf_counter_set_period(counter, hwc, idx))
			continue;

		if (perf_counter_overflow(counter, nmi, regs, 0))
		if (perf_counter_overflow(counter, 1, regs, 0))
			amd_pmu_disable_counter(hwc, idx);
	}

	return handled;
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
{
	irq_enter();
	apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	ack_APIC_irq();
	x86_pmu.handle_irq(regs, 0);
	irq_exit();
}

void smp_perf_pending_interrupt(struct pt_regs *regs)
{
	irq_enter();
@@ -910,7 +901,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
	 * If the first NMI handles both, the latter will be empty and daze
	 * the CPU.
	 */
	x86_pmu.handle_irq(regs, 1);
	x86_pmu.handle_irq(regs);

	return NOTIFY_STOP;
}
+0 −2
Original line number Diff line number Diff line
@@ -1026,8 +1026,6 @@ apicinterrupt SPURIOUS_APIC_VECTOR \
	spurious_interrupt smp_spurious_interrupt

#ifdef CONFIG_PERF_COUNTERS
apicinterrupt LOCAL_PERF_VECTOR \
	perf_counter_interrupt smp_perf_counter_interrupt
apicinterrupt LOCAL_PENDING_VECTOR \
	perf_pending_interrupt smp_perf_pending_interrupt
#endif
Loading