Commit 5fb896a4 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/tracing/core' of...

Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
parents d351c8db 2a7b8df0
...@@ -14,17 +14,4 @@ typedef struct { ...@@ -14,17 +14,4 @@ typedef struct {
void ack_bad_irq(unsigned int irq); void ack_bad_irq(unsigned int irq);
#define HARDIRQ_BITS 12
/*
* The hardirq mask has to be large enough to have
* space for potentially nestable IRQ sources in the system
* to nest on a single CPU. On Alpha, interrupts are masked at the CPU
* by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode)
* so we really only have 8 nestable IRQs, but allow some overhead
*/
#if (1 << HARDIRQ_BITS) < 16
#error HARDIRQ_BITS is too low!
#endif
#endif /* _ALPHA_HARDIRQ_H */ #endif /* _ALPHA_HARDIRQ_H */
...@@ -20,15 +20,4 @@ void ack_bad_irq(unsigned int irq); ...@@ -20,15 +20,4 @@ void ack_bad_irq(unsigned int irq);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define HARDIRQ_BITS 12
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#endif /* __ASM_AVR32_HARDIRQ_H */ #endif /* __ASM_AVR32_HARDIRQ_H */
...@@ -20,16 +20,6 @@ ...@@ -20,16 +20,6 @@
#define local_softirq_pending() (local_cpu_data->softirq_pending) #define local_softirq_pending() (local_cpu_data->softirq_pending)
#define HARDIRQ_BITS 14
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
extern void __iomem *ipi_base_addr; extern void __iomem *ipi_base_addr;
void ack_bad_irq(unsigned int irq); void ack_bad_irq(unsigned int irq);
......
...@@ -15,61 +15,61 @@ ...@@ -15,61 +15,61 @@
* - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256)
* *
* The hardirq count can be overridden per architecture, the default is: * The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
* *
* - bits 16-27 are the hardirq count (max # of hardirqs: 4096) * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - ( bit 28 is the PREEMPT_ACTIVE flag. ) * - bit 26 is the NMI_MASK
* - bit 28 is the PREEMPT_ACTIVE flag
* *
* PREEMPT_MASK: 0x000000ff * PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00 * SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x0fff0000 * HARDIRQ_MASK: 0x03ff0000
* NMI_MASK: 0x04000000
*/ */
#define PREEMPT_BITS 8 #define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8 #define SOFTIRQ_BITS 8
#define NMI_BITS 1
#ifndef HARDIRQ_BITS #define MAX_HARDIRQ_BITS 10
#define HARDIRQ_BITS 12
#ifndef MAX_HARDIRQS_PER_CPU #ifndef HARDIRQ_BITS
#define MAX_HARDIRQS_PER_CPU NR_IRQS # define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif #endif
/* #if HARDIRQ_BITS > MAX_HARDIRQ_BITS
* The hardirq mask has to be large enough to have space for potentially #error HARDIRQ_BITS too high!
* all IRQ sources in the system nesting on a single CPU.
*/
#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
# error HARDIRQ_BITS is too low!
#endif
#endif #endif
#define PREEMPT_SHIFT 0 #define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1) #define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low! #error PREEMPT_ACTIVE is too low!
#endif #endif
#define NMI_OFFSET (PREEMPT_ACTIVE << 1)
#if NMI_OFFSET >= 0x80000000
#error PREEMPT_ACTIVE too high!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
/* /*
* Are we doing bottom half or hardware interrupt processing? * Are we doing bottom half or hardware interrupt processing?
...@@ -82,7 +82,7 @@ ...@@ -82,7 +82,7 @@
/* /*
* Are we in NMI context? * Are we in NMI context?
*/ */
#define in_nmi() (preempt_count() & NMI_OFFSET) #define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT) #if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked() # define PREEMPT_INATOMIC_BASE kernel_locked()
...@@ -175,24 +175,24 @@ extern void irq_enter(void); ...@@ -175,24 +175,24 @@ extern void irq_enter(void);
*/ */
extern void irq_exit(void); extern void irq_exit(void);
#define nmi_enter() \ #define nmi_enter() \
do { \ do { \
ftrace_nmi_enter(); \ ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \ BUG_ON(in_nmi()); \
add_preempt_count(NMI_OFFSET); \ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
lockdep_off(); \ lockdep_off(); \
rcu_nmi_enter(); \ rcu_nmi_enter(); \
__irq_enter(); \ trace_hardirq_enter(); \
} while (0) } while (0)
#define nmi_exit() \ #define nmi_exit() \
do { \ do { \
__irq_exit(); \ trace_hardirq_exit(); \
rcu_nmi_exit(); \ rcu_nmi_exit(); \
lockdep_on(); \ lockdep_on(); \
BUG_ON(!in_nmi()); \ BUG_ON(!in_nmi()); \
sub_preempt_count(NMI_OFFSET); \ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \ ftrace_nmi_exit(); \
} while (0) } while (0)
#endif /* LINUX_HARDIRQ_H */ #endif /* LINUX_HARDIRQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment