Commit 85b77cdd authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86/smp: Remove pointless duplicated interrupt code

Two NOP5s are really a good tradeoff vs. the unholy IDT switching mess,
which duplicates code all over the place. The rescheduling interrupt gets
optimized in a later step.

Make the ordering of function call and statistics increment the same as in
other places. Calculate stats first, then do the function call.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170828064957.222101344@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0f42ae28
...@@ -49,8 +49,8 @@ extern asmlinkage void call_function_single_interrupt(void); ...@@ -49,8 +49,8 @@ extern asmlinkage void call_function_single_interrupt(void);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
/* Interrupt handlers registered during init_IRQ */ /* Interrupt handlers registered during init_IRQ */
extern void trace_reschedule_interrupt(void); extern void trace_reschedule_interrupt(void);
extern void trace_call_function_interrupt(void); #define trace_call_function_interrupt call_function_interrupt
extern void trace_call_function_single_interrupt(void); #define trace_call_function_single_interrupt call_function_single_interrupt
#define trace_thermal_interrupt thermal_interrupt #define trace_thermal_interrupt thermal_interrupt
#define trace_threshold_interrupt threshold_interrupt #define trace_threshold_interrupt threshold_interrupt
#define trace_deferred_error_interrupt deferred_error_interrupt #define trace_deferred_error_interrupt deferred_error_interrupt
......
...@@ -281,57 +281,28 @@ __visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs) ...@@ -281,57 +281,28 @@ __visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
*/ */
ipi_entering_ack_irq(); ipi_entering_ack_irq();
trace_reschedule_entry(RESCHEDULE_VECTOR); trace_reschedule_entry(RESCHEDULE_VECTOR);
__smp_reschedule_interrupt(); inc_irq_stat(irq_resched_count);
scheduler_ipi();
trace_reschedule_exit(RESCHEDULE_VECTOR); trace_reschedule_exit(RESCHEDULE_VECTOR);
exiting_irq(); exiting_irq();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
}
static inline void __smp_call_function_interrupt(void)
{
generic_smp_call_function_interrupt();
inc_irq_stat(irq_call_count);
} }
__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs) __visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
__smp_call_function_interrupt();
exiting_irq();
}
__visible void __irq_entry
smp_trace_call_function_interrupt(struct pt_regs *regs)
{ {
ipi_entering_ack_irq(); ipi_entering_ack_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR); trace_call_function_entry(CALL_FUNCTION_VECTOR);
__smp_call_function_interrupt();
trace_call_function_exit(CALL_FUNCTION_VECTOR);
exiting_irq();
}
static inline void __smp_call_function_single_interrupt(void)
{
generic_smp_call_function_single_interrupt();
inc_irq_stat(irq_call_count); inc_irq_stat(irq_call_count);
} generic_smp_call_function_interrupt();
trace_call_function_exit(CALL_FUNCTION_VECTOR);
__visible void __irq_entry
smp_call_function_single_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
__smp_call_function_single_interrupt();
exiting_irq(); exiting_irq();
} }
__visible void __irq_entry __visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
smp_trace_call_function_single_interrupt(struct pt_regs *regs)
{ {
ipi_entering_ack_irq(); ipi_entering_ack_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
__smp_call_function_single_interrupt(); inc_irq_stat(irq_call_count);
generic_smp_call_function_single_interrupt();
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
exiting_irq(); exiting_irq();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment