Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 85b77cdd authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86/smp: Remove pointless duplicated interrupt code



Two NOP5s are really a good tradeoff vs. the unholy IDT switching mess,
which duplicates code all over the place. The rescheduling interrupt gets
optimized in a later step.

Make the ordering of function call and statistics increment the same as in
other places. Calculate stats first, then do the function call.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170828064957.222101344@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0f42ae28
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -49,8 +49,8 @@ extern asmlinkage void call_function_single_interrupt(void);
#ifdef CONFIG_TRACING
/* Interrupt handlers registered during init_IRQ */
extern void trace_reschedule_interrupt(void);
extern void trace_call_function_interrupt(void);
extern void trace_call_function_single_interrupt(void);
#define trace_call_function_interrupt	call_function_interrupt
#define trace_call_function_single_interrupt	call_function_single_interrupt
#define trace_thermal_interrupt	thermal_interrupt
#define trace_threshold_interrupt	threshold_interrupt
#define trace_deferred_error_interrupt	deferred_error_interrupt
+7 −36
Original line number Diff line number Diff line
@@ -281,57 +281,28 @@ __visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
	 */
	ipi_entering_ack_irq();
	trace_reschedule_entry(RESCHEDULE_VECTOR);
	__smp_reschedule_interrupt();
	inc_irq_stat(irq_resched_count);
	scheduler_ipi();
	trace_reschedule_exit(RESCHEDULE_VECTOR);
	exiting_irq();
	/*
	 * KVM uses this interrupt to force a cpu out of guest mode
	 */
}

static inline void __smp_call_function_interrupt(void)
{
	generic_smp_call_function_interrupt();
	inc_irq_stat(irq_call_count);
}

__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
{
	ipi_entering_ack_irq();
	__smp_call_function_interrupt();
	exiting_irq();
}

__visible void __irq_entry
smp_trace_call_function_interrupt(struct pt_regs *regs)
{
	ipi_entering_ack_irq();
	trace_call_function_entry(CALL_FUNCTION_VECTOR);
	__smp_call_function_interrupt();
	trace_call_function_exit(CALL_FUNCTION_VECTOR);
	exiting_irq();
}

static inline void __smp_call_function_single_interrupt(void)
{
	generic_smp_call_function_single_interrupt();
	inc_irq_stat(irq_call_count);
}

__visible void __irq_entry
smp_call_function_single_interrupt(struct pt_regs *regs)
{
	ipi_entering_ack_irq();
	__smp_call_function_single_interrupt();
	generic_smp_call_function_interrupt();
	trace_call_function_exit(CALL_FUNCTION_VECTOR);
	exiting_irq();
}

__visible void __irq_entry
smp_trace_call_function_single_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
{
	ipi_entering_ack_irq();
	trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
	__smp_call_function_single_interrupt();
	inc_irq_stat(irq_call_count);
	generic_smp_call_function_single_interrupt();
	trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
	exiting_irq();
}