Commit 3e37014c authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Linus Torvalds

[PATCH] M68k irq updates

M68k irq updates (most from Roman Zippel):
  - Update m68k to new irq API in 2.5.29
  - synchronize_irq() takes a parameter now
  - Remove obsolete definitions
  - Avoid too much inline assembly in __xchg
  - Add irqs_disabled(), which appeared in 2.5.34
  - Add missing implementation for in_atomic() (introduced in 2.5.35)
parent 277963a4
......@@ -222,7 +222,7 @@ ENTRY(nmi_handler)
inthandler:
SAVE_ALL_INT
GET_CURRENT(%d0)
addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
| put exception # in d0
bfextu %sp@(PT_VECTOR){#4,#10},%d0
......@@ -241,7 +241,7 @@ inthandler:
3: addql #8,%sp | pop parameters off stack
ret_from_interrupt:
subql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
jeq 1f
2:
RESTORE_ALL
......
......@@ -71,8 +71,6 @@ int main(void)
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
DEFINE(CPUSTAT_LOCAL_IRQ_COUNT, offsetof(irq_cpustat_t, __local_irq_count));
DEFINE(CPUSTAT_LOCAL_BH_COUNT, offsetof(irq_cpustat_t, __local_bh_count));
DEFINE(CPUSTAT_SYSCALL_COUNT, offsetof(irq_cpustat_t, __syscall_count));
/* offsets into the bi_record struct */
......
......@@ -7,24 +7,87 @@
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define in_interrupt() (local_irq_count(smp_processor_id()) + local_bh_count(smp_processor_id()) != 0)
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define irq_enter(cpu) (local_irq_count(cpu)++)
#define irq_exit(cpu) (local_irq_count(cpu)--)
#define __MASK(x) ((1UL << (x))-1)
#define synchronize_irq() barrier()
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#define synchronize_irq(irq) barrier()
#endif
......@@ -6,14 +6,19 @@
*/
#include <asm/atomic.h>
#include <asm/hardirq.h>
#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
#define local_bh_disable() \
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
#define __local_bh_enable() local_bh_enable()
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define local_bh_enable() \
do { \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
preempt_check_resched(); \
} while (0)
#endif
......@@ -7,11 +7,6 @@
#include <asm/segment.h>
#include <asm/entry.h>
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
......@@ -52,24 +47,25 @@ asmlinkage void resume(void);
#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
#else
#include <asm/hardirq.h>
#define local_irq_enable() ({ \
if (MACH_IS_Q40 || !local_irq_count(smp_processor_id())) \
asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
#define local_irq_enable() ({ \
if (MACH_IS_Q40 || !hardirq_count()) \
asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
})
#endif
#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
static inline int irqs_disabled(void)
{
unsigned long flags;
local_save_flags(flags);
return flags & ~ALLOWINT;
}
/* For spinlocks etc */
#define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); })
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(x) local_save_flags(x)
#define restore_flags(x) local_irq_restore(x)
#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
/*
* Force strict CPU ordering.
* Not really required on m68k...
......@@ -95,33 +91,29 @@ struct __xchg_dummy { unsigned long a[100]; };
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
unsigned long tmp, flags;
save_flags(flags);
cli();
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"moveb %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"movew %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"movel %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
restore_flags(flags);
return tmp;
unsigned long flags, tmp;
local_irq_save(flags);
switch (size) {
case 1:
tmp = *(u8 *)ptr;
*(u8 *)ptr = x;
break;
case 2:
tmp = *(u16 *)ptr;
*(u16 *)ptr = x;
break;
case 4:
tmp = *(u32 *)ptr;
*(u32 *)ptr = x;
break;
default:
BUG();
}
local_irq_restore(flags);
return tmp;
}
#else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment