Commit 5f6fb454 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of...

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (116 commits)
  x86: Enable forced interrupt threading support
  x86: Mark low level interrupts IRQF_NO_THREAD
  x86: Use generic show_interrupts
  x86: ioapic: Avoid redundant lookup of irq_cfg
  x86: ioapic: Use new move_irq functions
  x86: Use the proper accessors in fixup_irqs()
  x86: ioapic: Use irq_data->state
  x86: ioapic: Simplify irq chip and handler setup
  x86: Cleanup the genirq name space
  genirq: Add chip flag to force mask on suspend
  genirq: Add desc->irq_data accessor
  genirq: Add comments to Kconfig switches
  genirq: Fixup fasteoi handler for oneshot mode
  genirq: Provide forced interrupt threading
  sched: Switch wait_task_inactive to schedule_hrtimeout()
  genirq: Add IRQF_NO_THREAD
  genirq: Allow shared oneshot interrupts
  genirq: Prepare the handling of shared oneshot interrupts
  genirq: Make warning in handle_percpu_event useful
  x86: ioapic: Move trigger defines to io_apic.h
  ...

Fix up trivial(?) conflicts in arch/x86/pci/xen.c due to genirq name
space changes clashing with the Xen cleanups.  The set_irq_msi() had
moved to xen_bind_pirq_msi_to_irq().
parents 3904afb4 c0185808
......@@ -2444,6 +2444,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
<deci-seconds>: poll all this frequency
0: no polling (default)
threadirqs [KNL]
Force threading of all interrupt handlers except those
marked explicitely IRQF_NO_THREAD.
topology= [S390]
Format: {off | on}
Specify if the kernel should make use of the cpu
......
......@@ -68,6 +68,8 @@ config X86
select GENERIC_FIND_NEXT_BIT
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
config INSTRUCTION_DECODER
......@@ -813,7 +815,7 @@ config X86_LOCAL_APIC
config X86_IO_APIC
def_bool y
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
config X86_VISWS_APIC
def_bool y
......
......@@ -426,4 +426,16 @@ struct local_apic {
#else
#define BAD_APICID 0xFFFFu
#endif
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
#endif /* _ASM_X86_APICDEF_H */
......@@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
} __attribute__ ((packed)) bits;
};
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
......@@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry {
index : 15;
} __attribute__ ((packed));
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
#ifdef CONFIG_X86_IO_APIC
/*
......@@ -150,11 +143,6 @@ extern int timer_through_8259;
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
extern u8 io_apic_unique_id(u8 id);
extern int io_apic_get_unique_id(int ioapic, int apic_id);
extern int io_apic_get_version(int ioapic);
extern int io_apic_get_redir_entries(int ioapic);
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
......@@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi);
extern void ioapic_and_gsi_init(void);
extern void ioapic_insert_resources(void);
int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
......@@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void);
extern void mp_save_irq(struct mpc_intsrc *m);
extern void disable_ioapic_support(void);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
......@@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr) { return 0; }
static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{
return NULL;
}
static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { }
static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent)
{
return -ENOMEM;
}
static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { }
static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent)
{
return -ENOMEM;
}
static inline void mp_save_irq(struct mpc_intsrc *m) { };
static inline void disable_ioapic_support(void) { }
#endif
#endif /* _ASM_X86_IO_APIC_H */
......@@ -43,6 +43,7 @@
#include <asm/i8259.h>
#include <asm/proto.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/idle.h>
......@@ -1209,7 +1210,7 @@ void __cpuinit setup_local_APIC(void)
rdtscll(tsc);
if (disable_apic) {
arch_disable_smp_support();
disable_ioapic_support();
return;
}
......@@ -1448,7 +1449,7 @@ int __init enable_IR(void)
void __init enable_IR_x2apic(void)
{
unsigned long flags;
struct IO_APIC_route_entry **ioapic_entries = NULL;
struct IO_APIC_route_entry **ioapic_entries;
int ret, x2apic_enabled = 0;
int dmar_table_init_ret;
......
This diff is collapsed.
......@@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
if (!irq)
return -EINVAL;
set_irq_data(irq, dev);
irq_set_handler_data(irq, dev);
if (hpet_setup_msi_irq(irq))
return -EINVAL;
......
......@@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
i8259A_chip.name);
enable_irq(irq);
}
......
......@@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq)
#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing:
* /proc/interrupts printing for arch specific interrupts
*/
static int show_other_interrupts(struct seq_file *p, int prec)
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
......@@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
if (i > nr_irqs)
return 0;
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
j *= 10;
if (i == nr_irqs)
return show_other_interrupts(p, prec);
/* print header */
if (i == 0) {
seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
}
desc = irq_to_desc(i);
if (!desc)
return 0;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
if (!action && !any_count)
goto out;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", desc->irq_data.chip->name);
seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
/*
* /proc/stat helpers
*/
......@@ -293,6 +240,7 @@ void fixup_irqs(void)
static int warned;
struct irq_desc *desc;
struct irq_data *data;
struct irq_chip *chip;
for_each_irq_desc(irq, desc) {
int break_affinity = 0;
......@@ -307,10 +255,10 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */
raw_spin_lock(&desc->lock);
data = &desc->irq_data;
data = irq_desc_get_irq_data(desc);
affinity = data->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
cpumask_subset(affinity, cpu_online_mask)) {
raw_spin_unlock(&desc->lock);
continue;
}
......@@ -327,16 +275,17 @@ void fixup_irqs(void)
affinity = cpu_all_mask;
}
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask)
data->chip->irq_mask(data);
chip = irq_data_get_irq_chip(data);
if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
chip->irq_mask(data);
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data, affinity, true);
if (chip->irq_set_affinity)
chip->irq_set_affinity(data, affinity, true);
else if (!(warned++))
set_affinity = 0;
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask)
data->chip->irq_unmask(data);
if (!irqd_can_move_in_process_context(data) && chip->irq_unmask)
chip->irq_unmask(data);
raw_spin_unlock(&desc->lock);
......@@ -368,10 +317,11 @@ void fixup_irqs(void)
irq = __this_cpu_read(vector_irq[vector]);
desc = irq_to_desc(irq);
data = &desc->irq_data;
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data);
raw_spin_lock(&desc->lock);
if (data->chip->irq_retrigger)
data->chip->irq_retrigger(data);
if (chip->irq_retrigger)
chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock);
}
}
......
......@@ -71,6 +71,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
static struct irqaction fpu_irq = {
.handler = math_error_irq,
.name = "fpu",
.flags = IRQF_NO_THREAD,
};
#endif
......@@ -80,6 +81,7 @@ static struct irqaction fpu_irq = {
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
......@@ -110,7 +112,7 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0);
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
set_irq_chip_and_handler_name(i, chip, handle_level_irq, name);
irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
}
void __init init_IRQ(void)
......
......@@ -64,6 +64,7 @@
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
......@@ -927,6 +928,14 @@ int __cpuinit native_cpu_up(unsigned int cpu)
return 0;
}
/**
* arch_disable_smp_support() - disables SMP support for x86 at runtime
*/
void arch_disable_smp_support(void)
{
disable_ioapic_support();
}
/*
* Fall back to non SMP mode after errors.
*
......@@ -1027,7 +1036,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
"(tell your hw vendor)\n");
}
smpboot_clear_io_apic();
arch_disable_smp_support();
disable_ioapic_support();
return -1;
}
......
......@@ -847,7 +847,7 @@ static void __init lguest_init_IRQ(void)
void lguest_setup_irq(unsigned int irq)
{
irq_alloc_desc_at(irq, 0);
set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level");
}
......@@ -995,7 +995,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
static void lguest_time_init(void)
{
/* Set up the timer interrupt (0) to go to our simple timer routine */
set_irq_handler(0, lguest_time_irq);
irq_set_handler(0, lguest_time_irq);
clocksource_register(&lguest_clock);
......
......@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_cfg *cfg = get_irq_chip_data(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
......@@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
else
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_name);
mmr_value = 0;
......
......@@ -569,11 +569,13 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
static struct irqaction master_action = {
.handler = piix4_master_intr,
.name = "PIIX4-8259",
.flags = IRQF_NO_THREAD,
};
static struct irqaction cascade_action = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static inline void set_piix4_virtual_irq_type(void)
......@@ -606,7 +608,7 @@ static void __init visws_pre_intr_init(void)
chip = &cobalt_irq_type;
if (chip)
set_irq_chip(i, chip);
irq_set_chip(i, chip);
}
setup_irq(CO_IRQ_8259, &master_action);
......
......@@ -674,7 +674,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
pirq_to_irq[pirq] = irq;
ret = set_irq_msi(irq, msidesc);
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
out:
......
......@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
......@@ -56,6 +58,7 @@
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
......@@ -68,22 +71,9 @@
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
/*
* Bits used by threaded handlers:
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
* IRQTF_DIED - handler thread died
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
*/
enum {
IRQTF_RUNTHREAD,
IRQTF_DIED,
IRQTF_WARNED,
IRQTF_AFFINITY,
};
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
/*
* These values can be returned by request_any_context_irq() and
......@@ -111,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_fn: interupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
* @thread_flags: flags related to @thread
* @thread_mask: bitmask for keeping track of @thread activity
*/
struct irqaction {
irq_handler_t handler;
......@@ -121,6 +112,7 @@ struct irqaction {
irq_handler_t thread_fn;
struct task_struct *thread;
unsigned long thread_flags;
unsigned long thread_mask;
const char *name;
struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
......@@ -241,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
* @release: Function to be called on release. This will be
* called in process context. Once registered, the
* structure must only be freed when this function is
* called or later.
*/
struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
static inline void irq_run_affinity_notifiers(void)
{
flush_scheduled_work();
}
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
......@@ -315,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long
}
/* IRQ wakeup (PM) control: */
extern int set_irq_wake(unsigned int irq, unsigned int on);
extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
/* Please do not use: Use the replacement functions instead */
static inline int set_irq_wake(unsigned int irq, unsigned int on)
{
return irq_set_irq_wake(irq, on);
}
#endif
static inline int enable_irq_wake(unsigned int irq)
{
return set_irq_wake(irq, 1);
return irq_set_irq_wake(irq, 1);
}
static inline int disable_irq_wake(unsigned int irq)
{
return set_irq_wake(irq, 0);
return irq_set_irq_wake(irq, 0);
}
#else /* !CONFIG_GENERIC_HARDIRQS */
......@@ -354,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq)
}
#endif /* CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_IRQ_FORCED_THREADING
extern bool force_irqthreads;
#else
#define force_irqthreads (0)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) (local_softirq_pending() = (x))
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
......@@ -653,6 +689,7 @@ static inline void init_irq_proc(void)
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
......
This diff is collapsed.
......@@ -8,6 +8,7 @@
* For now it's included from <linux/irq.h>
*/
struct irq_affinity_notify;
struct proc_dir_entry;
struct timer_rand_state;
/**
......@@ -18,13 +19,16 @@ struct timer_rand_state;
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @action: the irq action chain
* @status: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
......@@ -45,6 +49,7 @@ struct irq_desc {
struct {
unsigned int irq;
unsigned int node;
unsigned int pad_do_not_even_think_about_it;
struct irq_chip *chip;
void *handler_data;
void *chip_data;
......@@ -59,9 +64,16 @@ struct irq_desc {
struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
#endif
struct irqaction *action; /* IRQ action list */
#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
unsigned int status_use_accessors;
#else
unsigned int status; /* IRQ status */
#endif
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
......@@ -70,10 +82,12 @@ struct irq_desc {
raw_spinlock_t lock;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
#endif
unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
......@@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
#ifdef CONFIG_GENERIC_HARDIRQS
#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
}
static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
{
return desc->irq_data.chip;
}
static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
{
return desc->irq_data.chip_data;
}
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
return desc->irq_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
return desc->irq_data.msi_desc;
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc)
{
return irq_desc_get_chip(desc);
}
static inline void *get_irq_desc_data(struct irq_desc *desc)
{
return irq_desc_get_handler_data(desc);
}
static inline void *get_irq_desc_chip_data(struct irq_desc *desc)
{
return irq_desc_get_chip_data(desc);
}
static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc)
{
return irq_desc_get_msi_desc(desc);
}
#endif
/*
* Architectures call this to let the generic IRQ layer
......@@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq)
return desc->action != NULL;
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
......@@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
desc = irq_to_desc(irq);
return desc->status & IRQ_NO_BALANCING_MASK;
}
#endif
/* caller has locked the irq_desc and both params are valid */
static inline void __set_irq_handler_unlocked(int irq,
......@@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq,
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void
__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->preflow_handler = handler;
}
#endif
#endif
#endif
# Select this to activate the generic irq options below
config HAVE_GENERIC_HARDIRQS
def_bool n
bool
if HAVE_GENERIC_HARDIRQS
menu "IRQ subsystem"
......@@ -11,26 +12,44 @@ config GENERIC_HARDIRQS
# Select this to disable the deprecated stuff
config GENERIC_HARDIRQS_NO_DEPRECATED
def_bool n
bool
config GENERIC_HARDIRQS_NO_COMPAT
bool
# Options selectable by the architecture code
# Make sparse irq Kconfig switch below available
config HAVE_SPARSE_IRQ
def_bool n
bool
# Enable the generic irq autoprobe mechanism
config GENERIC_IRQ_PROBE
def_bool n
bool
# Use the generic /proc/interrupts implementation
config GENERIC_IRQ_SHOW
bool
# Support for delayed migration from interrupt context
config GENERIC_PENDING_IRQ
def_bool n
bool
# Alpha specific irq affinity mechanism
config AUTO_IRQ_AFFINITY
def_bool n
config IRQ_PER_CPU
def_bool n
bool
# Tasklet based software resend for pending interrupts on enable_irq()
config HARDIRQS_SW_RESEND
def_bool n
bool
# Preflow handler support for fasteoi (sparc64)
config IRQ_PREFLOW_FASTEOI
bool
# Support forced irq threading
config IRQ_FORCED_THREADING
bool
config SPARSE_IRQ
bool "Support sparse irq numbering"
......
......@@ -17,7 +17,7 @@
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
* "IRQ_WAITING" cleared and the interrupt disabled.
* "IRQS_WAITING" cleared and the interrupt disabled.
*/
static DEFINE_MUTEX(probing_active);
......@@ -32,7 +32,6 @@ unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
unsigned long mask = 0;
unsigned int status;
int i;
/*
......@@ -46,13 +45,7 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
* the handle_bad_irq handler there:
*/
compat_irq_chip_set_default_handler(desc);
if (!desc->action && irq_settings_can_probe(desc)) {
/*
* Some chips need to know about probing in
* progress:
......@@ -60,7 +53,7 @@ unsigned long probe_irq_on(void)
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
desc->irq_data.chip->irq_startup(&desc->irq_data);
irq_startup(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
......@@ -75,10 +68,12 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->irq_data.chip->irq_startup(&desc->irq_data))
desc->status |= IRQ_PENDING;
if (!desc->action && irq_settings_can_probe(desc)) {
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
if (irq_startup(desc)) {
irq_compat_set_pending(desc);
desc->istate |= IRQS_PENDING;
}
}
raw_spin_unlock_irq(&desc->lock);
}
......@@ -93,13 +88,12 @@ unsigned long probe_irq_on(void)
*/
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (desc->istate & IRQS_AUTODETECT) {
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
if (!(desc->istate & IRQS_WAITING)) {
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
} else
if (i < 32)
mask |= 1 << i;
......@@ -125,20 +119,18 @@ EXPORT_SYMBOL(probe_irq_on);
*/
unsigned int probe_irq_mask(unsigned long val)
{
unsigned int status, mask = 0;
unsigned int mask = 0;
struct irq_desc *desc;
int i;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (i < 16 && !(status & IRQ_WAITING))
if (desc->istate & IRQS_AUTODETECT) {
if (i < 16 && !(desc->istate & IRQS_WAITING))
mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT;
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
......@@ -169,20 +161,18 @@ int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_of_irqs = 0;
struct irq_desc *desc;
unsigned int status;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (!(status & IRQ_WAITING)) {
if (desc->istate & IRQS_AUTODETECT) {
if (!(desc->istate & IRQS_WAITING)) {
if (!nr_of_irqs)
irq_found = i;
nr_of_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
......
This diff is collapsed.
/*
* Compat layer for transition period
*/
#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
static inline void irq_compat_set_progress(struct irq_desc *desc)
{
desc->status |= IRQ_INPROGRESS;
}
static inline void irq_compat_clr_progress(struct irq_desc *desc)
{
desc->status &= ~IRQ_INPROGRESS;
}
static inline void irq_compat_set_disabled(struct irq_desc *desc)
{
desc->status |= IRQ_DISABLED;
}
static inline void irq_compat_clr_disabled(struct irq_desc *desc)
{
desc->status &= ~IRQ_DISABLED;
}
static inline void irq_compat_set_pending(struct irq_desc *desc)
{
desc->status |= IRQ_PENDING;
}
static inline void irq_compat_clr_pending(struct irq_desc *desc)
{
desc->status &= ~IRQ_PENDING;
}
static inline void irq_compat_set_masked(struct irq_desc *desc)
{
desc->status |= IRQ_MASKED;
}
static inline void irq_compat_clr_masked(struct irq_desc *desc)
{
desc->status &= ~IRQ_MASKED;
}
static inline void irq_compat_set_move_pending(struct irq_desc *desc)
{
desc->status |= IRQ_MOVE_PENDING;
}
static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
{
desc->status &= ~IRQ_MOVE_PENDING;
}
static inline void irq_compat_set_affinity(struct irq_desc *desc)
{
desc->status |= IRQ_AFFINITY_SET;
}
static inline void irq_compat_clr_affinity(struct irq_desc *desc)
{
desc->status &= ~IRQ_AFFINITY_SET;
}
#else
static inline void irq_compat_set_progress(struct irq_desc *desc) { }
static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
static inline void irq_compat_set_disabled(struct irq_desc *desc) { }
static inline void irq_compat_clr_disabled(struct irq_desc *desc) { }
static inline void irq_compat_set_pending(struct irq_desc *desc) { }
static inline void irq_compat_clr_pending(struct irq_desc *desc) { }
static inline void irq_compat_set_masked(struct irq_desc *desc) { }
static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
#endif
/*
* Debugging printout:
*/
#include <linux/kallsyms.h>
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
{
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
printk("->action(): %p\n", desc->action);
if (desc->action) {
printk("->action->handler(): %p, ", desc->action->handler);
print_symbol("%s\n", (unsigned long)desc->action->handler);
}
P(IRQ_LEVEL);
P(IRQ_PER_CPU);
P(IRQ_NOPROBE);
P(IRQ_NOREQUEST);
P(IRQ_NOAUTOEN);
PS(IRQS_AUTODETECT);
PS(IRQS_INPROGRESS);
PS(IRQS_REPLAY);
PS(IRQS_WAITING);
PS(IRQS_DISABLED);
PS(IRQS_PENDING);
PS(IRQS_MASKED);
}
#undef P
#undef PS
......@@ -51,30 +51,92 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
"but no thread function available.", irq, action->name);
}
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
* @action: the interrupt action chain for this irq
static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
/*
* Wake up the handler thread for this action. In case the
* thread crashed and was killed we just pretend that we
* handled the interrupt. The hardirq handler has disabled the
* device interrupt, so no irq storm is lurking. If the
* RUNTHREAD bit is already set, nothing to do.
*/
if (test_bit(IRQTF_DIED, &action->thread_flags) ||
test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
return;
/*
* It's safe to OR the mask lockless here. We have only two
* places which write to threads_oneshot: This code and the
* irq thread.
*
* Handles the action chain of an irq event
* This code is the hard irq context and can never run on two
* cpus in parallel. If it ever does we have more serious
* problems than this bitmask.
*
* The irq threads of this irq which clear their "running" bit
* in threads_oneshot are serialized via desc->lock against
* each other and they are serialized against this code by
* IRQS_INPROGRESS.
*
* Hard irq handler:
*
* spin_lock(desc->lock);
* desc->state |= IRQS_INPROGRESS;
* spin_unlock(desc->lock);
* set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
* desc->threads_oneshot |= mask;
* spin_lock(desc->lock);
* desc->state &= ~IRQS_INPROGRESS;
* spin_unlock(desc->lock);
*
* irq thread:
*
* again:
* spin_lock(desc->lock);
* if (desc->state & IRQS_INPROGRESS) {
* spin_unlock(desc->lock);
* while(desc->state & IRQS_INPROGRESS)
* cpu_relax();
* goto again;
* }
* if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
* desc->threads_oneshot &= ~mask;
* spin_unlock(desc->lock);
*
* So either the thread waits for us to clear IRQS_INPROGRESS
* or we are waiting in the flow handler for desc->lock to be
* released before we reach this point. The thread also checks
* IRQTF_RUNTHREAD under desc->lock. If set it leaves
* threads_oneshot untouched and runs the thread another time.
*/
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
desc->threads_oneshot |= action->thread_mask;
wake_up_process(action->thread);
}
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
irqreturn_t retval = IRQ_NONE;
unsigned int random = 0, irq = desc->irq_data.irq;
do {
irqreturn_t res;
trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, ret);
res = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, res);
if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
irq, action->handler))
local_irq_disable();
switch (ret) {
switch (res) {
case IRQ_WAKE_THREAD:
/*
* Set result to handled so the spurious check
* does not trigger.
*/
ret = IRQ_HANDLED;
res = IRQ_HANDLED;
/*
* Catch drivers which return WAKE_THREAD but
......@@ -85,36 +147,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
break;
}
/*
* Wake up the handler thread for this
* action. In case the thread crashed and was
* killed we just pretend that we handled the
* interrupt. The hardirq handler above has
* disabled the device interrupt, so no irq
* storm is lurking.
*/
if (likely(!test_bit(IRQTF_DIED,
&action->thread_flags))) {
set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
wake_up_process(action->thread);
}
irq_wake_thread(desc, action);
/* Fall through to add to randomness */
case IRQ_HANDLED:
status |= action->flags;
random |= action->flags;
break;
default:
break;
}
retval |= ret;
retval |= res;
action = action->next;
} while (action);
if (status & IRQF_SAMPLE_RANDOM)
if (random & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
if (!noirqdebug)
note_interrupt(irq, desc, retval);
return retval;
}
irqreturn_t handle_irq_event(struct irq_desc *desc)
{
struct irqaction *action = desc->action;
irqreturn_t ret;
irq_compat_clr_pending(desc);
desc->istate &= ~IRQS_PENDING;
irq_compat_set_progress(desc);
desc->istate |= IRQS_INPROGRESS;
raw_spin_unlock(&desc->lock);
ret = handle_irq_event_percpu(desc, action);
raw_spin_lock(&desc->lock);
desc->istate &= ~IRQS_INPROGRESS;
irq_compat_clr_progress(desc);
return ret;
}
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
* @action: the interrupt action chain for this irq
*
* Handles the action chain of an irq event
*/
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
return handle_irq_event_percpu(irq_to_desc(irq), action);
}
/*
* IRQ subsystem internal functions and variables:
*
* Do not ever include this file from anything else than
* kernel/irq/. Do not even think about using any information outside
* of this file for your non core code.
*/
#include <linux/irqdesc.h>
......@@ -9,25 +13,89 @@
# define IRQ_BITMAP_BITS NR_IRQS
#endif
#define istate core_internal_state__do_not_mess_with_it
#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
# define status status_use_accessors
#endif
extern int noirqdebug;
/*
* Bits used by threaded handlers:
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
* IRQTF_DIED - handler thread died
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
* IRQTF_FORCED_THREAD - irq action is force threaded
*/
enum {
IRQTF_RUNTHREAD,
IRQTF_DIED,
IRQTF_WARNED,
IRQTF_AFFINITY,
IRQTF_FORCED_THREAD,
};
/*
* Bit masks for desc->state
*
* IRQS_AUTODETECT - autodetection in progress
* IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
* detection
* IRQS_POLL_INPROGRESS - polling in progress
* IRQS_INPROGRESS - Interrupt in progress
* IRQS_ONESHOT - irq is not unmasked in primary handler
* IRQS_REPLAY - irq is replayed
* IRQS_WAITING - irq is waiting
* IRQS_DISABLED - irq is disabled
* IRQS_PENDING - irq is pending and replayed later
* IRQS_MASKED - irq is masked
* IRQS_SUSPENDED - irq is suspended
*/
enum {
IRQS_AUTODETECT = 0x00000001,
IRQS_SPURIOUS_DISABLED = 0x00000002,
IRQS_POLL_INPROGRESS = 0x00000008,
IRQS_INPROGRESS = 0x00000010,
IRQS_ONESHOT = 0x00000020,
IRQS_REPLAY = 0x00000040,
IRQS_WAITING = 0x00000080,
IRQS_DISABLED = 0x00000100,
IRQS_PENDING = 0x00000200,
IRQS_MASKED = 0x00000400,
IRQS_SUSPENDED = 0x00000800,
};
#include "compat.h"
#include "debug.h"
#include "settings.h"
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
/* Set default functions for irq_chip structures: */
extern void irq_chip_set_defaults(struct irq_chip *chip);
/* Set default handler: */
extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern int irq_startup(struct irq_desc *desc);
extern void irq_shutdown(struct irq_desc *desc);
extern void irq_enable(struct irq_desc *desc);
extern void irq_disable(struct irq_desc *desc);
extern void mask_irq(struct irq_desc *desc);
extern void unmask_irq(struct irq_desc *desc);
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
bool irq_wait_for_poll(struct irq_desc *desc);
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
......@@ -43,20 +111,10 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
extern int irq_select_affinity_usr(unsigned int irq);
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
static inline void irq_end(unsigned int irq, struct irq_desc *desc)
{
if (desc->irq_data.chip && desc->irq_data.chip->end)
desc->irq_data.chip->end(irq);
}
#else
static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
#endif
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc)
{
......@@ -70,43 +128,60 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
}
struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
static inline struct irq_desc *
irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
{
return __irq_get_desc_lock(irq, flags, true);
}
static inline void
irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
{
__irq_put_desc_unlock(desc, flags, true);
}
static inline struct irq_desc *
irq_get_desc_lock(unsigned int irq, unsigned long *flags)
{
return __irq_get_desc_lock(irq, flags, false);
}
static inline void
irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
{
__irq_put_desc_unlock(desc, flags, false);
}
/*
* Debugging printout:
* Manipulation functions for irq_data.state
*/
static inline void irqd_set_move_pending(struct irq_data *d)
{
d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
irq_compat_set_move_pending(irq_data_to_desc(d));
}
#include <linux/kallsyms.h>
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
static inline void irqd_clr_move_pending(struct irq_data *d)
{
d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
irq_compat_clr_move_pending(irq_data_to_desc(d));
}
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
{
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
printk("->action(): %p\n", desc->action);
if (desc->action) {
printk("->action->handler(): %p, ", desc->action->handler);
print_symbol("%s\n", (unsigned long)desc->action->handler);
}
P(IRQ_INPROGRESS);
P(IRQ_DISABLED);
P(IRQ_PENDING);
P(IRQ_REPLAY);
P(IRQ_AUTODETECT);
P(IRQ_WAITING);
P(IRQ_LEVEL);
P(IRQ_MASKED);
#ifdef CONFIG_IRQ_PER_CPU
P(IRQ_PER_CPU);
#endif
P(IRQ_NOPROBE);
P(IRQ_NOREQUEST);
P(IRQ_NOAUTOEN);
d->state_use_accessors &= ~mask;
}
#undef P
static inline void irqd_set(struct irq_data *d, unsigned int mask)
{
d->state_use_accessors |= mask;
}
static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
{
return d->state_use_accessors & mask;
}
......@@ -79,7 +79,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
desc->irq_data.chip_data = NULL;
desc->irq_data.handler_data = NULL;
desc->irq_data.msi_desc = NULL;
desc->status = IRQ_DEFAULT_INIT_FLAGS;
irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
desc->istate = IRQS_DISABLED;
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
desc->irq_count = 0;
......@@ -206,6 +207,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
return NULL;
}
static int irq_expand_nr_irqs(unsigned int nr)
{
if (nr > IRQ_BITMAP_BITS)
return -ENOMEM;
nr_irqs = nr;
return 0;
}
int __init early_irq_init(void)
{
int i, initcnt, node = first_online_node;
......@@ -238,7 +247,7 @@ int __init early_irq_init(void)
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DEFAULT_INIT_FLAGS,
.istate = IRQS_DISABLED,
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
......@@ -260,8 +269,8 @@ int __init early_irq_init(void)
for (i = 0; i < count; i++) {
desc[i].irq_data.irq = i;
desc[i].irq_data.chip = &no_irq_chip;
/* TODO : do this allocation on-demand ... */
desc[i].kstat_irqs = alloc_percpu(unsigned int);
irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
alloc_masks(desc + i, GFP_KERNEL, node);
desc_smp_init(desc + i, node);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
......@@ -286,24 +295,14 @@ static void free_desc(unsigned int irq)
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
{
#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
struct irq_desc *desc;
unsigned int i;
for (i = 0; i < cnt; i++) {
desc = irq_to_desc(start + i);
if (desc && !desc->kstat_irqs) {
unsigned int __percpu *stats = alloc_percpu(unsigned int);
if (!stats)
return -1;
if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
free_percpu(stats);
}
}
#endif
return start;
}
static int irq_expand_nr_irqs(unsigned int nr)
{
return -ENOMEM;
}
#endif /* !CONFIG_SPARSE_IRQ */
/* Dynamic interrupt handling */
......@@ -347,14 +346,17 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
mutex_lock(&sparse_irq_lock);
start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
from, cnt, 0);
ret = -EEXIST;
if (irq >=0 && start != irq)
goto err;
ret = -ENOMEM;
if (start >= nr_irqs)
if (start + cnt > nr_irqs) {
ret = irq_expand_nr_irqs(start + cnt);
if (ret)
goto err;
}
bitmap_set(allocated_irqs, start, cnt);
mutex_unlock(&sparse_irq_lock);
......@@ -401,6 +403,26 @@ unsigned int irq_get_next_irq(unsigned int offset)
return find_next_bit(allocated_irqs, nr_irqs, offset);
}
struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
if (bus)
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, *flags);
}
return desc;
}
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (bus)
chip_bus_sync_unlock(desc);
}
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
......
This diff is collapsed.
......@@ -4,23 +4,23 @@
#include "internals.h"
void move_masked_irq(int irq)
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_chip *chip = desc->irq_data.chip;
struct irq_desc *desc = irq_data_to_desc(idata);
struct irq_chip *chip = idata->chip;
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
if (CHECK_IRQ_PER_CPU(desc->status)) {
if (!irqd_can_balance(&desc->irq_data)) {
WARN_ON(1);
return;
}
desc->status &= ~IRQ_MOVE_PENDING;
irqd_clr_move_pending(&desc->irq_data);
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
......@@ -53,15 +53,20 @@ void move_masked_irq(int irq)
cpumask_clear(desc->pending_mask);
}
void move_native_irq(int irq)
void move_masked_irq(int irq)
{
irq_move_masked_irq(irq_get_irq_data(irq));
}
void irq_move_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_desc *desc = irq_data_to_desc(idata);
bool masked;
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
if (likely(!irqd_is_setaffinity_pending(idata)))
return;
if (unlikely(desc->status & IRQ_DISABLED))
if (unlikely(desc->istate & IRQS_DISABLED))
return;
/*
......@@ -69,10 +74,15 @@ void move_native_irq(int irq)
* threaded interrupt with ONESHOT set, we can end up with an
* interrupt storm.
*/
masked = desc->status & IRQ_MASKED;
masked = desc->istate & IRQS_MASKED;
if (!masked)
desc->irq_data.chip->irq_mask(&desc->irq_data);
move_masked_irq(irq);
idata->chip->irq_mask(idata);
irq_move_masked_irq(idata);
if (!masked)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
idata->chip->irq_unmask(idata);
}
void move_native_irq(int irq)
{
irq_move_irq(irq_get_irq_data(irq));
}
......@@ -18,7 +18,7 @@
* During system-wide suspend or hibernation device drivers need to be prevented
* from receiving interrupts and this function is provided for this purpose.
* It marks all interrupt lines in use, except for the timer ones, as disabled
* and sets the IRQ_SUSPENDED flag for each of them.
* and sets the IRQS_SUSPENDED flag for each of them.
*/
void suspend_device_irqs(void)
{
......@@ -34,7 +34,7 @@ void suspend_device_irqs(void)
}
for_each_irq_desc(irq, desc)
if (desc->status & IRQ_SUSPENDED)
if (desc->istate & IRQS_SUSPENDED)
synchronize_irq(irq);
}
EXPORT_SYMBOL_GPL(suspend_device_irqs);
......@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs);
* resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
*
* Enable all interrupt lines previously disabled by suspend_device_irqs() that
* have the IRQ_SUSPENDED flag set.
* have the IRQS_SUSPENDED flag set.
*/
void resume_device_irqs(void)
{
......@@ -68,9 +68,24 @@ int check_wakeup_irqs(void)
struct irq_desc *desc;
int irq;
for_each_irq_desc(irq, desc)
if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
for_each_irq_desc(irq, desc) {
if (irqd_is_wakeup_set(&desc->irq_data)) {
if (desc->istate & IRQS_PENDING)
return -EBUSY;
continue;
}
/*
* Check the non wakeup interrupts whether they need
* to be masked before finally going into suspend
* state. That's for hardware which has no wakeup
* source configuration facility. The chip
* implementation indicates that with
* IRQCHIP_MASK_ON_SUSPEND.
*/
if (desc->istate & IRQS_SUSPENDED &&
irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
mask_irq(desc);
}
return 0;
}
......@@ -11,6 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include "internals.h"
......@@ -24,7 +25,7 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
const struct cpumask *mask = desc->irq_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
seq_cpumask(m, mask);
......@@ -65,8 +66,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
cpumask_var_t new_value;
int err;
if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
irq_balancing_disabled(irq))
if (!irq_can_set_affinity(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
......@@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
if (!cpumask_intersects(new_value, cpu_online_mask)) {
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
err = irq_select_affinity_usr(irq) ? -EINVAL : count;
err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
} else {
irq_set_affinity(irq, new_value);
err = count;
......@@ -357,3 +357,65 @@ void init_irq_proc(void)
}
}
#ifdef CONFIG_GENERIC_IRQ_SHOW
int __weak arch_show_interrupts(struct seq_file *p, int prec)
{
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
static int prec;
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
if (i > nr_irqs)
return 0;
if (i == nr_irqs)
return arch_show_interrupts(p, prec);
/* print header and calculate the width of the first column */
if (i == 0) {
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
j *= 10;
seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
}
desc = irq_to_desc(i);
if (!desc)
return 0;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
if (!action && !any_count)
goto out;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", desc->irq_data.chip->name);
seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
......@@ -55,20 +55,19 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq)
{
unsigned int status = desc->status;
/*
* Make sure the interrupt is enabled, before resending it:
*/
desc->irq_data.chip->irq_enable(&desc->irq_data);
/*
* We do not resend level type interrupts. Level type
* interrupts are resent by hardware when they are still
* active.
*/
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
if (irq_settings_is_level(desc))
return;
if (desc->istate & IRQS_REPLAY)
return;
if (desc->istate & IRQS_PENDING) {
irq_compat_clr_pending(desc);
desc->istate &= ~IRQS_PENDING;
desc->istate |= IRQS_REPLAY;
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
......
/*
* Internal header to deal with irq_desc->status which will be renamed
* to irq_desc->settings.
*/
enum {
_IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
_IRQ_PER_CPU = IRQ_PER_CPU,
_IRQ_LEVEL = IRQ_LEVEL,
_IRQ_NOPROBE = IRQ_NOPROBE,
_IRQ_NOREQUEST = IRQ_NOREQUEST,
_IRQ_NOAUTOEN = IRQ_NOAUTOEN,
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
#define IRQ_INPROGRESS GOT_YOU_MORON
#define IRQ_REPLAY GOT_YOU_MORON
#define IRQ_WAITING GOT_YOU_MORON
#define IRQ_DISABLED GOT_YOU_MORON
#define IRQ_PENDING GOT_YOU_MORON
#define IRQ_MASKED GOT_YOU_MORON
#define IRQ_WAKEUP GOT_YOU_MORON
#define IRQ_MOVE_PENDING GOT_YOU_MORON
#define IRQ_PER_CPU GOT_YOU_MORON
#define IRQ_NO_BALANCING GOT_YOU_MORON
#define IRQ_AFFINITY_SET GOT_YOU_MORON
#define IRQ_LEVEL GOT_YOU_MORON
#define IRQ_NOPROBE GOT_YOU_MORON
#define IRQ_NOREQUEST GOT_YOU_MORON
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
static inline void
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
{
desc->status &= ~(clr & _IRQF_MODIFY_MASK);
desc->status |= (set & _IRQF_MODIFY_MASK);
}
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status & _IRQ_PER_CPU;
}
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
{
desc->status |= _IRQ_PER_CPU;
}
static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
{
desc->status |= _IRQ_NO_BALANCING;
}
static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
{
return desc->status & _IRQ_NO_BALANCING;
}
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
{
return desc->status & IRQ_TYPE_SENSE_MASK;
}
static inline void
irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
{
desc->status &= ~IRQ_TYPE_SENSE_MASK;
desc->status |= mask & IRQ_TYPE_SENSE_MASK;
}
static inline bool irq_settings_is_level(struct irq_desc *desc)
{
return desc->status & _IRQ_LEVEL;
}
static inline void irq_settings_clr_level(struct irq_desc *desc)
{
desc->status &= ~_IRQ_LEVEL;
}
static inline void irq_settings_set_level(struct irq_desc *desc)
{
desc->status |= _IRQ_LEVEL;
}
static inline bool irq_settings_can_request(struct irq_desc *desc)
{
return !(desc->status & _IRQ_NOREQUEST);
}
static inline void irq_settings_clr_norequest(struct irq_desc *desc)
{
desc->status &= ~_IRQ_NOREQUEST;
}
static inline void irq_settings_set_norequest(struct irq_desc *desc)
{
desc->status |= _IRQ_NOREQUEST;
}
static inline bool irq_settings_can_probe(struct irq_desc *desc)
{
return !(desc->status & _IRQ_NOPROBE);
}
static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
{
desc->status &= ~_IRQ_NOPROBE;
}
static inline void irq_settings_set_noprobe(struct irq_desc *desc)
{
desc->status |= _IRQ_NOPROBE;
}
static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
{
return desc->status & _IRQ_MOVE_PCNTXT;
}
static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
{
return !(desc->status & _IRQ_NOAUTOEN);
}
static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
{
return desc->status & _IRQ_NESTED_THREAD;
}
/* Nothing should touch desc->status from now on */
#undef status
#define status USE_THE_PROPER_WRAPPERS_YOU_MORON
This diff is collapsed.
......@@ -2286,7 +2286,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
schedule_timeout_uninterruptible(1);
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
continue;
}
......
......@@ -311,9 +311,21 @@ void irq_enter(void)
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
# define invoke_softirq() __do_softirq()
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
__do_softirq();
else
wakeup_softirqd();
}
#else
# define invoke_softirq() do_softirq()
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
do_softirq();
else
wakeup_softirqd();
}
#endif
/*
......@@ -737,7 +749,10 @@ static int run_ksoftirqd(void * __bind_cpu)
don't process */
if (cpu_is_offline((long)__bind_cpu))
goto wait_to_die;
do_softirq();
local_irq_disable();
if (local_softirq_pending())
__do_softirq();
local_irq_enable();
preempt_enable_no_resched();
cond_resched();
preempt_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment