Commit 0d7012a9 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] genirq: cleanup: turn ARCH_HAS_IRQ_PER_CPU into CONFIG_IRQ_PER_CPU

Cleanup: change ARCH_HAS_IRQ_PER_CPU into a Kconfig method.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cd916d31
......@@ -28,6 +28,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config IRQ_PER_CPU
bool
default y
config CRIS
bool
default y
......
......@@ -492,6 +492,10 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
config IRQ_PER_CPU
bool
default y
source "arch/ia64/hp/sim/Kconfig"
menu "Instrumentation Support"
......
......@@ -1618,6 +1618,11 @@ config GENERIC_IRQ_PROBE
bool
default y
config IRQ_PER_CPU
depends on SMP
bool
default y
#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed
......
......@@ -51,6 +51,10 @@ config GENERIC_HARDIRQS
config GENERIC_IRQ_PROBE
def_bool y
config IRQ_PER_CPU
bool
default y
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
......
......@@ -30,6 +30,10 @@ config GENERIC_HARDIRQS
bool
default y
config IRQ_PER_CPU
bool
default y
config RWSEM_GENERIC_SPINLOCK
bool
......
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
/*
* IRQ line status macro IRQ_PER_CPU is used
*/
#define ARCH_HAS_IRQ_PER_CPU
#include <asm/arch/irq.h>
static inline int irq_canonicalize(int irq)
......
......@@ -14,11 +14,6 @@
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
/*
* IRQ line status macro IRQ_PER_CPU is used
*/
#define ARCH_HAS_IRQ_PER_CPU
static __inline__ int
irq_canonicalize (int irq)
{
......
......@@ -4,10 +4,4 @@
#define NR_IRQS 256
#ifdef CONFIG_SMP
#define ARCH_HAS_IRQ_PER_CPU
#endif
#endif /* __ASM_MACH_MIPS_IRQ_H */
......@@ -26,11 +26,6 @@
#define NR_IRQS (CPU_IRQ_MAX + 1)
/*
* IRQ line status macro IRQ_PER_CPU is used
*/
#define ARCH_HAS_IRQ_PER_CPU
static __inline__ int irq_canonicalize(int irq)
{
return (irq == 2) ? 9 : irq;
......
......@@ -30,11 +30,6 @@
#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
/*
* IRQ line status macro IRQ_PER_CPU is used
*/
#define ARCH_HAS_IRQ_PER_CPU
#define get_irq_desc(irq) (&irq_desc[(irq)])
/* Define a way to iterate across irqs. */
......
......@@ -33,7 +33,7 @@
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
#define IRQ_LEVEL 64 /* IRQ level triggered */
#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
#ifdef ARCH_HAS_IRQ_PER_CPU
#ifdef CONFIG_IRQ_PER_CPU
# define IRQ_PER_CPU 256 /* IRQ is per CPU */
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
#else
......
......@@ -198,7 +198,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
if (!(old->flags & new->flags & SA_SHIRQ))
goto mismatch;
#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU))
goto mismatch;
......@@ -213,7 +213,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
}
*p = new;
#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
if (new->flags & SA_PERCPU_IRQ)
desc->status |= IRQ_PER_CPU;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment