Commit b8d99b4c authored by Russell King's avatar Russell King

[ARM] Update ARM IRQ code for 2.5.28 global IRQ changes

irq_enter/irq_exit no longer take arguments.  We also use the
x86 methods for hardirq.h and softirq.h; they're sufficient
for ARM.
parent 07ca706f
......@@ -217,14 +217,14 @@ do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
desc->triggered = 1;
irq_enter(cpu, irq);
irq_enter();
kstat.irqs[cpu][irq]++;
action = desc->action;
if (action)
__do_irq(irq, desc->action, regs);
irq_exit(cpu, irq);
irq_exit();
}
/*
......@@ -256,7 +256,7 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
*/
desc->running = 1;
irq_enter(cpu, irq);
irq_enter();
kstat.irqs[cpu][irq]++;
do {
......@@ -274,7 +274,7 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
__do_irq(irq, action, regs);
} while (desc->pending);
irq_exit(cpu, irq);
irq_exit();
desc->running = 0;
......@@ -311,7 +311,7 @@ do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
desc->chip->ack(irq);
if (likely(desc->enabled)) {
irq_enter(cpu, irq);
irq_enter();
kstat.irqs[cpu][irq]++;
/*
......@@ -325,7 +325,7 @@ do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
!check_irq_lock(desc, irq, regs)))
desc->chip->unmask(irq);
}
irq_exit(cpu, irq);
irq_exit();
}
}
......
......@@ -17,27 +17,70 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
* - bit 26 is the PREEMPT_ACTIVE flag
*/
#define in_interrupt() ({ const int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#ifndef CONFIG_SMP
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK|SOFTIRQ_MASK))
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define irq_enter(cpu,irq) (local_irq_count(cpu)++)
#define irq_exit(cpu,irq) (local_irq_count(cpu)--)
/*
* The hardirq mask has to be large enough to have space
* for potentially all IRQ sources in the system nesting
* on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#ifndef CONFIG_SMP
#define irq_exit() \
do { \
preempt_count() -= HARDIRQ_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
__asm__("bl%? __do_softirq": : : "lr");/* out of line */\
preempt_enable_no_resched(); \
} while (0)
#define synchronize_irq(irq) barrier()
#else
#error SMP not supported
#endif /* CONFIG_SMP */
#endif
#endif /* __ASM_HARDIRQ_H */
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
#include <asm/atomic.h>
#include <linux/preempt.h>
#include <asm/hardirq.h>
#define __cpu_bh_enable(cpu) \
do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
#define cpu_bh_disable(cpu) \
do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
#define local_bh_disable() \
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define _local_bh_enable() \
#define local_bh_enable() \
do { \
unsigned int *ptr = &local_bh_count(smp_processor_id()); \
if (!--*ptr && ptr[-2]) \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
__asm__("bl%? __do_softirq": : : "lr");/* out of line */\
preempt_check_resched(); \
} while (0)
#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
#endif /* __ASM_SOFTIRQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment