Commit bed4f130 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/irq' into x86/core

parents 3e5621ed bf8bd66d
...@@ -6,10 +6,10 @@ ...@@ -6,10 +6,10 @@
#endif #endif
/* /*
Macros for dwarf2 CFI unwind table entries. * Macros for dwarf2 CFI unwind table entries.
See "as.info" for details on these pseudo ops. Unfortunately * See "as.info" for details on these pseudo ops. Unfortunately
they are only supported in very new binutils, so define them * they are only supported in very new binutils, so define them
away for older version. * away for older version.
*/ */
#ifdef CONFIG_AS_CFI #ifdef CONFIG_AS_CFI
...@@ -36,8 +36,10 @@ ...@@ -36,8 +36,10 @@
#else #else
/* Due to the structure of pre-exisiting code, don't use assembler line /*
comment character # to ignore the arguments. Instead, use a dummy macro. */ * Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0 .macro cfi_ignore a=0, b=0, c=0, d=0
.endm .endm
...@@ -58,4 +60,37 @@ ...@@ -58,4 +60,37 @@
#endif #endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
/* 32bit defenitions are missed yet */
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */ #endif /* _ASM_X86_DWARF2_H */
...@@ -22,6 +22,8 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); ...@@ -22,6 +22,8 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
void ack_bad_irq(unsigned int irq); void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h> #include <linux/irq_cpustat.h>
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#define __ARCH_IRQ_STAT 1 #define __ARCH_IRQ_STAT 1
#define inc_irq_stat(member) add_pda(member, 1)
#define local_softirq_pending() read_pda(__softirq_pending) #define local_softirq_pending() read_pda(__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1 #define __ARCH_SET_SOFTIRQ_PENDING 1
......
...@@ -109,9 +109,7 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); ...@@ -109,9 +109,7 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
#endif #endif
#endif #endif
#ifdef CONFIG_X86_32 extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
extern void (*const interrupt[NR_VECTORS])(void);
#endif
typedef int vector_irq_t[NR_VECTORS]; typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq); DECLARE_PER_CPU(vector_irq_t, vector_irq);
......
...@@ -57,5 +57,65 @@ ...@@ -57,5 +57,65 @@
#define __ALIGN_STR ".align 16,0x90" #define __ALIGN_STR ".align 16,0x90"
#endif #endif
/*
* to check ENTRY_X86/END_X86 and
* KPROBE_ENTRY_X86/KPROBE_END_X86
* unbalanced-missed-mixed appearance
*/
#define __set_entry_x86 .set ENTRY_X86_IN, 0
#define __unset_entry_x86 .set ENTRY_X86_IN, 1
#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
#define __check_entry_x86 \
.ifdef ENTRY_X86_IN; \
.ifeq ENTRY_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_kprobe_x86 \
.ifdef KPROBE_X86_IN; \
.ifeq KPROBE_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_entry_kprobe_x86 \
__check_entry_x86; \
__check_kprobe_x86
#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
#define ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_entry_x86; \
.globl name; \
__ALIGN; \
name:
#define END_X86(name) \
__unset_entry_x86; \
__check_entry_kprobe_x86; \
.size name, .-name
#define KPROBE_ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_kprobe_x86; \
.pushsection .kprobes.text, "ax"; \
.globl name; \
__ALIGN; \
name:
#define KPROBE_END_X86(name) \
__unset_kprobe_x86; \
__check_entry_kprobe_x86; \
.size name, .-name; \
.popsection
#endif /* _ASM_X86_LINKAGE_H */ #endif /* _ASM_X86_LINKAGE_H */
...@@ -777,11 +777,7 @@ static void local_apic_timer_interrupt(void) ...@@ -777,11 +777,7 @@ static void local_apic_timer_interrupt(void)
/* /*
* the NMI deadlock-detector uses this. * the NMI deadlock-detector uses this.
*/ */
#ifdef CONFIG_X86_64 inc_irq_stat(apic_timer_irqs);
add_pda(apic_timer_irqs, 1);
#else
per_cpu(irq_stat, cpu).apic_timer_irqs++;
#endif
evt->event_handler(evt); evt->event_handler(evt);
} }
...@@ -1677,14 +1673,11 @@ void smp_spurious_interrupt(struct pt_regs *regs) ...@@ -1677,14 +1673,11 @@ void smp_spurious_interrupt(struct pt_regs *regs)
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
ack_APIC_irq(); ack_APIC_irq();
#ifdef CONFIG_X86_64 inc_irq_stat(irq_spurious_count);
add_pda(irq_spurious_count, 1);
#else
/* see sw-dev-man vol 3, chapter 7.4.13.5 */ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
pr_info("spurious APIC interrupt on CPU#%d, " pr_info("spurious APIC interrupt on CPU#%d, "
"should never happen.\n", smp_processor_id()); "should never happen.\n", smp_processor_id());
__get_cpu_var(irq_stat).irq_spurious_count++;
#endif
irq_exit(); irq_exit();
} }
......
...@@ -237,7 +237,7 @@ asmlinkage void mce_threshold_interrupt(void) ...@@ -237,7 +237,7 @@ asmlinkage void mce_threshold_interrupt(void)
} }
} }
out: out:
add_pda(irq_threshold_count, 1); inc_irq_stat(irq_threshold_count);
irq_exit(); irq_exit();
} }
......
...@@ -26,7 +26,7 @@ asmlinkage void smp_thermal_interrupt(void) ...@@ -26,7 +26,7 @@ asmlinkage void smp_thermal_interrupt(void)
if (therm_throt_process(msr_val & 1)) if (therm_throt_process(msr_val & 1))
mce_log_therm_throt_event(smp_processor_id(), msr_val); mce_log_therm_throt_event(smp_processor_id(), msr_val);
add_pda(irq_thermal_count, 1); inc_irq_stat(irq_thermal_count);
irq_exit(); irq_exit();
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -129,7 +129,7 @@ void __init native_init_IRQ(void) ...@@ -129,7 +129,7 @@ void __init native_init_IRQ(void)
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* SYSCALL_VECTOR was reserved in trap_init. */ /* SYSCALL_VECTOR was reserved in trap_init. */
if (i != SYSCALL_VECTOR) if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i]); set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
} }
......
...@@ -23,41 +23,6 @@ ...@@ -23,41 +23,6 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/i8259.h> #include <asm/i8259.h>
/*
* Common place to define all x86 IRQ vectors
*
* This builds up the IRQ handler stubs using some ugly macros in irq.h
*
* These macros create the low-level assembly IRQ routines that save
* register context and call do_IRQ(). do_IRQ() then does all the
* operations that are needed to keep the AT (or SMP IOAPIC)
* interrupt-controller happy.
*/
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
/*
* SMP has a few special interrupts for IPI messages
*/
#define BUILD_IRQ(nr) \
asmlinkage void IRQ_NAME(nr); \
asm("\n.text\n.p2align\n" \
"IRQ" #nr "_interrupt:\n\t" \
"push $~(" #nr ") ; " \
"jmp common_interrupt\n" \
".previous");
#define BI(x,y) \
BUILD_IRQ(x##y)
#define BUILD_16_IRQS(x) \
BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
BI(x,c) BI(x,d) BI(x,e) BI(x,f)
/* /*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
* (these are usually mapped to vectors 0x30-0x3f) * (these are usually mapped to vectors 0x30-0x3f)
...@@ -73,37 +38,6 @@ ...@@ -73,37 +38,6 @@
* *
* (these are usually mapped into the 0x30-0xff vector range) * (these are usually mapped into the 0x30-0xff vector range)
*/ */
BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
#undef BUILD_16_IRQS
#undef BI
#define IRQ(x,y) \
IRQ##x##y##_interrupt
#define IRQLIST_16(x) \
IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
/* for the irq vectors */
static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
IRQLIST_16(0x2), IRQLIST_16(0x3),
IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
};
#undef IRQ
#undef IRQLIST_16
/* /*
* IRQ2 is cascade interrupt to second interrupt controller * IRQ2 is cascade interrupt to second interrupt controller
......
...@@ -165,11 +165,7 @@ static void native_smp_send_stop(void) ...@@ -165,11 +165,7 @@ static void native_smp_send_stop(void)
void smp_reschedule_interrupt(struct pt_regs *regs) void smp_reschedule_interrupt(struct pt_regs *regs)
{ {
ack_APIC_irq(); ack_APIC_irq();
#ifdef CONFIG_X86_32 inc_irq_stat(irq_resched_count);
__get_cpu_var(irq_stat).irq_resched_count++;
#else
add_pda(irq_resched_count, 1);
#endif
} }
void smp_call_function_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs)
...@@ -177,11 +173,7 @@ void smp_call_function_interrupt(struct pt_regs *regs) ...@@ -177,11 +173,7 @@ void smp_call_function_interrupt(struct pt_regs *regs)
ack_APIC_irq(); ack_APIC_irq();
irq_enter(); irq_enter();
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
#ifdef CONFIG_X86_32 inc_irq_stat(irq_call_count);
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit(); irq_exit();
} }
...@@ -190,11 +182,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) ...@@ -190,11 +182,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
ack_APIC_irq(); ack_APIC_irq();
irq_enter(); irq_enter();
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
#ifdef CONFIG_X86_32 inc_irq_stat(irq_call_count);
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit(); irq_exit();
} }
......
...@@ -75,7 +75,7 @@ EXPORT_SYMBOL(profile_pc); ...@@ -75,7 +75,7 @@ EXPORT_SYMBOL(profile_pc);
irqreturn_t timer_interrupt(int irq, void *dev_id) irqreturn_t timer_interrupt(int irq, void *dev_id)
{ {
/* Keep nmi watchdog up to date */ /* Keep nmi watchdog up to date */
per_cpu(irq_stat, smp_processor_id()).irq0_irqs++; inc_irq_stat(irq0_irqs);
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
if (timer_ack) { if (timer_ack) {
......
...@@ -51,7 +51,7 @@ EXPORT_SYMBOL(profile_pc); ...@@ -51,7 +51,7 @@ EXPORT_SYMBOL(profile_pc);
static irqreturn_t timer_interrupt(int irq, void *dev_id) static irqreturn_t timer_interrupt(int irq, void *dev_id)
{ {
add_pda(irq0_irqs, 1); inc_irq_stat(irq0_irqs);
global_clock_event->event_handler(global_clock_event); global_clock_event->event_handler(global_clock_event);
......
...@@ -118,7 +118,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -118,7 +118,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
out: out:
put_cpu_no_resched(); put_cpu_no_resched();
__get_cpu_var(irq_stat).irq_tlb_count++; inc_irq_stat(irq_tlb_count);
} }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
......
...@@ -154,7 +154,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -154,7 +154,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
out: out:
ack_APIC_irq(); ack_APIC_irq();
cpu_clear(cpu, f->flush_cpumask); cpu_clear(cpu, f->flush_cpumask);
add_pda(irq_tlb_count, 1); inc_irq_stat(irq_tlb_count);
} }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
......
...@@ -481,11 +481,7 @@ do_nmi(struct pt_regs *regs, long error_code) ...@@ -481,11 +481,7 @@ do_nmi(struct pt_regs *regs, long error_code)
{ {
nmi_enter(); nmi_enter();
#ifdef CONFIG_X86_32 inc_irq_stat(__nmi_count);
{ int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); }
#else
add_pda(__nmi_count, 1);
#endif
if (!ignore_nmis) if (!ignore_nmis)
default_do_nmi(regs); default_do_nmi(regs);
......
...@@ -590,7 +590,8 @@ static void __init lguest_init_IRQ(void) ...@@ -590,7 +590,8 @@ static void __init lguest_init_IRQ(void)
* a straightforward 1 to 1 mapping, so force that here. */ * a straightforward 1 to 1 mapping, so force that here. */
__get_cpu_var(vector_irq)[vector] = i; __get_cpu_var(vector_irq)[vector] = i;
if (vector != SYSCALL_VECTOR) { if (vector != SYSCALL_VECTOR) {
set_intr_gate(vector, interrupt[vector]); set_intr_gate(vector,
interrupt[vector-FIRST_EXTERNAL_VECTOR]);
set_irq_chip_and_handler_name(i, &lguest_irq_controller, set_irq_chip_and_handler_name(i, &lguest_irq_controller,
handle_level_irq, handle_level_irq,
"level"); "level");
......
...@@ -64,14 +64,6 @@ ...@@ -64,14 +64,6 @@
name: name:
#endif #endif
#define KPROBE_ENTRY(name) \
.pushsection .kprobes.text, "ax"; \
ENTRY(name)
#define KPROBE_END(name) \
END(name); \
.popsection
#ifndef END #ifndef END
#define END(name) \ #define END(name) \
.size name, .-name .size name, .-name
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment