Commit 8047e247 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Virtualize IRQ numbers.

Inspired by PowerPC XICS interrupt support code.

All IRQs are virtualized in order to keep NR_IRQS from needing
to be too large.  Interrupts on sparc64 are arbitrary 11-bit
values, but we don't need to define NR_IRQS to 2048 if we
virtualize the IRQs.

As PCI and SBUS controller drivers build device IRQs, we divy
out virtual IRQ numbers incrementally starting at 1.  Zero is
a special virtual IRQ used for the timer interrupt.

So device drivers all see virtual IRQs, and all the normal
interfaces such as request_irq(), enable_irq(), etc. translate
that into a real IRQ number in order to configure the IRQ.

At this point knowledge of the struct ino_bucket is almost
entirely contained within arch/sparc64/kernel/irq.c  There are
a few small bits in the PCI controller drivers that need to
be swept away before we can remove ino_bucket's definition
out of asm-sparc64/irq.h and privately into kernel/irq.c
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37cdcd9e
...@@ -70,7 +70,10 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY ...@@ -70,7 +70,10 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
*/ */
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
static struct irqaction *irq_action[NR_IRQS]; static struct irqaction timer_irq_action = {
.name = "timer",
};
static struct irqaction *irq_action[NR_IRQS] = { &timer_irq_action, };
/* This only synchronizes entities which modify IRQ handler /* This only synchronizes entities which modify IRQ handler
* state and some selected user-level spots that want to * state and some selected user-level spots that want to
...@@ -79,6 +82,59 @@ static struct irqaction *irq_action[NR_IRQS]; ...@@ -79,6 +82,59 @@ static struct irqaction *irq_action[NR_IRQS];
*/ */
static DEFINE_SPINLOCK(irq_action_lock); static DEFINE_SPINLOCK(irq_action_lock);
static unsigned int virt_to_real_irq_table[NR_IRQS];
static unsigned char virt_irq_cur = 1;
static unsigned char virt_irq_alloc(unsigned int real_irq)
{
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
ent = virt_irq_cur;
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
return 0;
}
virt_irq_cur = ent + 1;
virt_to_real_irq_table[ent] = real_irq;
return ent;
}
#if 0 /* Currently unused. */
static unsigned char real_to_virt_irq(unsigned int real_irq)
{
struct ino_bucket *bucket = __bucket(real_irq);
return bucket->virt_irq;
}
#endif
static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
return virt_to_real_irq_table[virt_irq];
}
void irq_install_pre_handler(int virt_irq,
void (*func)(struct ino_bucket *, void *, void *),
void *arg1, void *arg2)
{
unsigned int real_irq = virt_to_real_irq(virt_irq);
struct ino_bucket *bucket;
struct irq_desc *d;
if (unlikely(!real_irq))
return;
bucket = __bucket(real_irq);
d = bucket->irq_info;
d->pre_handler = func;
d->pre_handler_arg1 = arg1;
d->pre_handler_arg2 = arg2;
}
static void register_irq_proc (unsigned int irq); static void register_irq_proc (unsigned int irq);
/* /*
...@@ -164,14 +220,18 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) ...@@ -164,14 +220,18 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
return tid; return tid;
} }
/* Now these are always passed a true fully specified sun4u INO. */ void enable_irq(unsigned int virt_irq)
void enable_irq(unsigned int irq)
{ {
struct ino_bucket *bucket = __bucket(irq); unsigned int real_irq = virt_to_real_irq(virt_irq);
struct ino_bucket *bucket;
unsigned long imap, cpuid; unsigned long imap, cpuid;
if (unlikely(!real_irq))
return;
bucket = __bucket(real_irq);
imap = bucket->imap; imap = bucket->imap;
if (imap == 0UL) if (unlikely(imap == 0UL))
return; return;
preempt_disable(); preempt_disable();
...@@ -182,7 +242,7 @@ void enable_irq(unsigned int irq) ...@@ -182,7 +242,7 @@ void enable_irq(unsigned int irq)
cpuid = real_hard_smp_processor_id(); cpuid = real_hard_smp_processor_id();
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
unsigned int ino = __irq_ino(irq); unsigned int ino = __irq_ino(real_irq);
int err; int err;
err = sun4v_intr_settarget(ino, cpuid); err = sun4v_intr_settarget(ino, cpuid);
...@@ -211,16 +271,22 @@ void enable_irq(unsigned int irq) ...@@ -211,16 +271,22 @@ void enable_irq(unsigned int irq)
preempt_enable(); preempt_enable();
} }
/* This now gets passed true ino's as well. */ void disable_irq(unsigned int virt_irq)
void disable_irq(unsigned int irq)
{ {
struct ino_bucket *bucket = __bucket(irq); unsigned int real_irq = virt_to_real_irq(virt_irq);
struct ino_bucket *bucket;
unsigned long imap; unsigned long imap;
if (unlikely(!real_irq))
return;
bucket = __bucket(real_irq);
imap = bucket->imap; imap = bucket->imap;
if (imap != 0UL) { if (unlikely(imap == 0UL))
return;
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
unsigned int ino = __irq_ino(irq); unsigned int ino = __irq_ino(real_irq);
int err; int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
...@@ -239,7 +305,6 @@ void disable_irq(unsigned int irq) ...@@ -239,7 +305,6 @@ void disable_irq(unsigned int irq)
tmp &= ~IMAP_VALID; tmp &= ~IMAP_VALID;
upa_writel(tmp, imap); upa_writel(tmp, imap);
} }
}
} }
static void build_irq_error(const char *msg, unsigned int ino, int inofixup, static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
...@@ -253,14 +318,14 @@ static void build_irq_error(const char *msg, unsigned int ino, int inofixup, ...@@ -253,14 +318,14 @@ static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
prom_halt(); prom_halt();
} }
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags)
{ {
struct ino_bucket *bucket; struct ino_bucket *bucket;
int ino; int ino;
BUG_ON(tlb_type == hypervisor); BUG_ON(tlb_type == hypervisor);
/* RULE: Both must be specified in all other cases. */ /* RULE: Both must be specified. */
if (iclr == 0UL || imap == 0UL) { if (iclr == 0UL || imap == 0UL) {
prom_printf("Invalid build_irq %d %016lx %016lx\n", prom_printf("Invalid build_irq %d %016lx %016lx\n",
inofixup, iclr, imap); inofixup, iclr, imap);
...@@ -298,10 +363,12 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) ...@@ -298,10 +363,12 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
*/ */
bucket->imap = imap; bucket->imap = imap;
bucket->iclr = iclr; bucket->iclr = iclr;
bucket->flags = 0; if (!bucket->virt_irq)
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
bucket->flags = flags;
out: out:
return __irq(bucket); return bucket->virt_irq;
} }
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags) unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
...@@ -322,7 +389,8 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f ...@@ -322,7 +389,8 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
*/ */
bucket->imap = ~0UL - sysino; bucket->imap = ~0UL - sysino;
bucket->iclr = ~0UL - sysino; bucket->iclr = ~0UL - sysino;
if (!bucket->virt_irq)
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
bucket->flags = flags; bucket->flags = flags;
bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
...@@ -331,7 +399,7 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f ...@@ -331,7 +399,7 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
prom_halt(); prom_halt();
} }
return __irq(bucket); return bucket->virt_irq;
} }
static void atomic_bucket_insert(struct ino_bucket *bucket) static void atomic_bucket_insert(struct ino_bucket *bucket)
...@@ -390,37 +458,42 @@ static struct irqaction *get_action_slot(struct ino_bucket *bucket) ...@@ -390,37 +458,42 @@ static struct irqaction *get_action_slot(struct ino_bucket *bucket)
return NULL; return NULL;
} }
int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), int request_irq(unsigned int virt_irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char *name, void *dev_id) unsigned long irqflags, const char *name, void *dev_id)
{ {
struct irqaction *action; struct irqaction *action;
struct ino_bucket *bucket = __bucket(irq); struct ino_bucket *bucket;
unsigned long flags; unsigned long flags;
unsigned int real_irq;
int pending = 0; int pending = 0;
real_irq = virt_to_real_irq(virt_irq);
if (unlikely(!real_irq))
return -EINVAL;
if (unlikely(!handler)) if (unlikely(!handler))
return -EINVAL; return -EINVAL;
bucket = __bucket(real_irq);
if (unlikely(!bucket->irq_info)) if (unlikely(!bucket->irq_info))
return -ENODEV; return -ENODEV;
if (irqflags & SA_SAMPLE_RANDOM) { if (irqflags & SA_SAMPLE_RANDOM) {
/* /*
* This function might sleep, we want to call it first, * This function might sleep, we want to call it first,
* outside of the atomic block. In SA_STATIC_ALLOC case, * outside of the atomic block.
* random driver's kmalloc will fail, but it is safe.
* If already initialized, random driver will not reinit.
* Yes, this might clear the entropy pool if the wrong * Yes, this might clear the entropy pool if the wrong
* driver is attempted to be loaded, without actually * driver is attempted to be loaded, without actually
* installing a new handler, but is this really a problem, * installing a new handler, but is this really a problem,
* only the sysadmin is able to do this. * only the sysadmin is able to do this.
*/ */
rand_initialize_irq(PIL_DEVICE_IRQ); rand_initialize_irq(virt_irq);
} }
spin_lock_irqsave(&irq_action_lock, flags); spin_lock_irqsave(&irq_action_lock, flags);
if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) { if (check_irq_sharing(virt_irq, irqflags)) {
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
return -EBUSY; return -EBUSY;
} }
...@@ -441,12 +514,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ ...@@ -441,12 +514,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
action->name = name; action->name = name;
action->next = NULL; action->next = NULL;
action->dev_id = dev_id; action->dev_id = dev_id;
put_ino_in_irqaction(action, irq); put_ino_in_irqaction(action, __irq_ino(real_irq));
put_smpaff_in_irqaction(action, CPU_MASK_NONE); put_smpaff_in_irqaction(action, CPU_MASK_NONE);
append_irq_action(PIL_DEVICE_IRQ, action); append_irq_action(virt_irq, action);
enable_irq(irq); enable_irq(virt_irq);
/* We ate the IVEC already, this makes sure it does not get lost. */ /* We ate the IVEC already, this makes sure it does not get lost. */
if (pending) { if (pending) {
...@@ -456,7 +529,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ ...@@ -456,7 +529,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
register_irq_proc(__irq_ino(irq)); register_irq_proc(virt_irq);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
distribute_irqs(); distribute_irqs();
...@@ -466,17 +539,17 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ ...@@ -466,17 +539,17 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(request_irq);
static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) static struct irqaction *unlink_irq_action(unsigned int virt_irq, void *dev_id)
{ {
struct irqaction *action, **pp; struct irqaction *action, **pp;
pp = irq_action + PIL_DEVICE_IRQ; pp = irq_action + virt_irq;
action = *pp; action = *pp;
if (unlikely(!action)) if (unlikely(!action))
return NULL; return NULL;
if (unlikely(!action->handler)) { if (unlikely(!action->handler)) {
printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ); printk("Freeing free IRQ %d\n", virt_irq);
return NULL; return NULL;
} }
...@@ -491,28 +564,33 @@ static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) ...@@ -491,28 +564,33 @@ static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
return action; return action;
} }
void free_irq(unsigned int irq, void *dev_id) void free_irq(unsigned int virt_irq, void *dev_id)
{ {
struct irqaction *action; struct irqaction *action;
struct ino_bucket *bucket; struct ino_bucket *bucket;
struct irq_desc *desc; struct irq_desc *desc;
unsigned long flags; unsigned long flags;
unsigned int real_irq;
int ent, i; int ent, i;
real_irq = virt_to_real_irq(virt_irq);
if (unlikely(!real_irq))
return;
spin_lock_irqsave(&irq_action_lock, flags); spin_lock_irqsave(&irq_action_lock, flags);
action = unlink_irq_action(irq, dev_id); action = unlink_irq_action(virt_irq, dev_id);
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
if (unlikely(!action)) if (unlikely(!action))
return; return;
synchronize_irq(irq); synchronize_irq(virt_irq);
spin_lock_irqsave(&irq_action_lock, flags); spin_lock_irqsave(&irq_action_lock, flags);
bucket = __bucket(irq); bucket = __bucket(real_irq);
desc = bucket->irq_info; desc = bucket->irq_info;
for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
...@@ -545,7 +623,7 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -545,7 +623,7 @@ void free_irq(unsigned int irq, void *dev_id)
* the same IMAP are active. * the same IMAP are active.
*/ */
if (ent == NUM_IVECS) if (ent == NUM_IVECS)
disable_irq(irq); disable_irq(virt_irq);
} }
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
...@@ -554,10 +632,15 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -554,10 +632,15 @@ void free_irq(unsigned int irq, void *dev_id)
EXPORT_SYMBOL(free_irq); EXPORT_SYMBOL(free_irq);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq) void synchronize_irq(unsigned int virt_irq)
{ {
struct ino_bucket *bucket = __bucket(irq); unsigned int real_irq = virt_to_real_irq(virt_irq);
struct ino_bucket *bucket;
if (unlikely(!real_irq))
return;
bucket = __bucket(real_irq);
#if 0 #if 0
/* The following is how I wish I could implement this. /* The following is how I wish I could implement this.
* Unfortunately the ICLR registers are read-only, you can * Unfortunately the ICLR registers are read-only, you can
...@@ -616,7 +699,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs) ...@@ -616,7 +699,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
action_mask &= ~mask; action_mask &= ~mask;
if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) if (p->handler(bp->virt_irq, p->dev_id, regs) == IRQ_HANDLED)
random |= p->flags; random |= p->flags;
if (!action_mask) if (!action_mask)
...@@ -637,7 +720,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs) ...@@ -637,7 +720,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
/* Test and add entropy */ /* Test and add entropy */
if (random & SA_SAMPLE_RANDOM) if (random & SA_SAMPLE_RANDOM)
add_interrupt_randomness(PIL_DEVICE_IRQ); add_interrupt_randomness(bp->virt_irq);
out: out:
bp->flags &= ~IBF_INPROGRESS; bp->flags &= ~IBF_INPROGRESS;
} }
...@@ -657,7 +740,7 @@ void timer_irq(int irq, struct pt_regs *regs) ...@@ -657,7 +740,7 @@ void timer_irq(int irq, struct pt_regs *regs)
clear_softint(clr_mask); clear_softint(clr_mask);
irq_enter(); irq_enter();
kstat_this_cpu.irqs[irq]++; kstat_this_cpu.irqs[0]++;
timer_interrupt(irq, NULL, regs); timer_interrupt(irq, NULL, regs);
irq_exit(); irq_exit();
} }
...@@ -1022,12 +1105,12 @@ void __init init_IRQ(void) ...@@ -1022,12 +1105,12 @@ void __init init_IRQ(void)
: "g1"); : "g1");
} }
static struct proc_dir_entry * root_irq_dir; static struct proc_dir_entry *root_irq_dir;
static struct proc_dir_entry * irq_dir [NUM_IVECS]; static struct proc_dir_entry *irq_dir[NR_IRQS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int irq_affinity_read_proc (char *page, char **start, off_t off, static int irq_affinity_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
{ {
struct ino_bucket *bp = ivector_table + (long)data; struct ino_bucket *bp = ivector_table + (long)data;
...@@ -1047,11 +1130,20 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, ...@@ -1047,11 +1130,20 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
return len; return len;
} }
static inline void set_intr_affinity(int irq, cpumask_t hw_aff) static inline void set_intr_affinity(int virt_irq, cpumask_t hw_aff)
{ {
struct ino_bucket *bp = ivector_table + irq; struct ino_bucket *bp;
struct irq_desc *desc = bp->irq_info; struct irq_desc *desc;
struct irqaction *ap = desc->action; struct irqaction *ap;
unsigned int real_irq;
real_irq = virt_to_real_irq(virt_irq);
if (unlikely(!real_irq))
return;
bp = __bucket(real_irq);
desc = bp->irq_info;
ap = desc->action;
/* Users specify affinity in terms of hw cpu ids. /* Users specify affinity in terms of hw cpu ids.
* As soon as we do this, handler_irq() might see and take action. * As soon as we do this, handler_irq() might see and take action.
...@@ -1060,13 +1152,16 @@ static inline void set_intr_affinity(int irq, cpumask_t hw_aff) ...@@ -1060,13 +1152,16 @@ static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
/* Migration is simply done by the next cpu to service this /* Migration is simply done by the next cpu to service this
* interrupt. * interrupt.
*
* XXX Broken, this doesn't happen anymore...
*/ */
} }
static int irq_affinity_write_proc (struct file *file, const char __user *buffer, static int irq_affinity_write_proc(struct file *file,
const char __user *buffer,
unsigned long count, void *data) unsigned long count, void *data)
{ {
int irq = (long) data, full_count = count, err; int virt_irq = (long) data, full_count = count, err;
cpumask_t new_value; cpumask_t new_value;
err = cpumask_parse(buffer, count, new_value); err = cpumask_parse(buffer, count, new_value);
...@@ -1080,7 +1175,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer ...@@ -1080,7 +1175,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
if (cpus_empty(new_value)) if (cpus_empty(new_value))
return -EINVAL; return -EINVAL;
set_intr_affinity(irq, new_value); set_intr_affinity(virt_irq, new_value);
return full_count; return full_count;
} }
...@@ -1089,18 +1184,18 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer ...@@ -1089,18 +1184,18 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
#define MAX_NAMELEN 10 #define MAX_NAMELEN 10
static void register_irq_proc (unsigned int irq) static void register_irq_proc(unsigned int virt_irq)
{ {
char name [MAX_NAMELEN]; char name [MAX_NAMELEN];
if (!root_irq_dir || irq_dir[irq]) if (!root_irq_dir || irq_dir[virt_irq])
return; return;
memset(name, 0, MAX_NAMELEN); memset(name, 0, MAX_NAMELEN);
sprintf(name, "%x", irq); sprintf(name, "%d", virt_irq);
/* create /proc/irq/1234 */ /* create /proc/irq/1234 */
irq_dir[irq] = proc_mkdir(name, root_irq_dir); irq_dir[virt_irq] = proc_mkdir(name, root_irq_dir);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* XXX SMP affinity not supported on starfire yet. */ /* XXX SMP affinity not supported on starfire yet. */
...@@ -1112,7 +1207,7 @@ static void register_irq_proc (unsigned int irq) ...@@ -1112,7 +1207,7 @@ static void register_irq_proc (unsigned int irq)
if (entry) { if (entry) {
entry->nlink = 1; entry->nlink = 1;
entry->data = (void *)(long)irq; entry->data = (void *)(long)virt_irq;
entry->read_proc = irq_affinity_read_proc; entry->read_proc = irq_affinity_read_proc;
entry->write_proc = irq_affinity_write_proc; entry->write_proc = irq_affinity_write_proc;
} }
...@@ -1120,7 +1215,7 @@ static void register_irq_proc (unsigned int irq) ...@@ -1120,7 +1215,7 @@ static void register_irq_proc (unsigned int irq)
#endif #endif
} }
void init_irq_proc (void) void init_irq_proc(void)
{ {
/* create /proc/irq */ /* create /proc/irq */
root_irq_dir = proc_mkdir("irq", NULL); root_irq_dir = proc_mkdir("irq", NULL);
......
...@@ -280,7 +280,6 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm, ...@@ -280,7 +280,6 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev, struct pci_dev *pdev,
unsigned int ino) unsigned int ino)
{ {
struct ino_bucket *bucket;
unsigned long imap, iclr; unsigned long imap, iclr;
unsigned long imap_off, iclr_off; unsigned long imap_off, iclr_off;
int inofixup = 0; int inofixup = 0;
...@@ -309,10 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm, ...@@ -309,10 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
if ((ino & 0x20) == 0) if ((ino & 0x20) == 0)
inofixup = ino & 0x03; inofixup = ino & 0x03;
bucket = __bucket(build_irq(inofixup, iclr, imap)); return build_irq(inofixup, iclr, imap, IBF_PCI);
bucket->flags |= IBF_PCI;
return __irq(bucket);
} }
/* PSYCHO error handling support. */ /* PSYCHO error handling support. */
......
...@@ -544,10 +544,10 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm, ...@@ -544,10 +544,10 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev, struct pci_dev *pdev,
unsigned int ino) unsigned int ino)
{ {
struct ino_bucket *bucket;
unsigned long imap, iclr; unsigned long imap, iclr;
unsigned long imap_off, iclr_off; unsigned long imap_off, iclr_off;
int inofixup = 0; int inofixup = 0;
int virt_irq;
ino &= PCI_IRQ_INO; ino &= PCI_IRQ_INO;
if (ino < SABRE_ONBOARD_IRQ_BASE) { if (ino < SABRE_ONBOARD_IRQ_BASE) {
...@@ -573,23 +573,23 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm, ...@@ -573,23 +573,23 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
if ((ino & 0x20) == 0) if ((ino & 0x20) == 0)
inofixup = ino & 0x03; inofixup = ino & 0x03;
bucket = __bucket(build_irq(inofixup, iclr, imap)); virt_irq = build_irq(inofixup, iclr, imap, IBF_PCI);
bucket->flags |= IBF_PCI;
if (pdev) { if (pdev) {
struct pcidev_cookie *pcp = pdev->sysdata; struct pcidev_cookie *pcp = pdev->sysdata;
if (pdev->bus->number != pcp->pbm->pci_first_busno) { if (pdev->bus->number != pcp->pbm->pci_first_busno) {
struct pci_controller_info *p = pcp->pbm->parent; struct pci_controller_info *p = pcp->pbm->parent;
struct irq_desc *d = bucket->irq_info;
d->pre_handler = sabre_wsync_handler; irq_install_pre_handler(virt_irq,
d->pre_handler_arg1 = pdev; sabre_wsync_handler,
d->pre_handler_arg2 = (void *) pdev,
p->pbm_A.controller_regs + SABRE_WRSYNC; (void *)
p->pbm_A.controller_regs +
SABRE_WRSYNC);
} }
} }
return __irq(bucket); return virt_irq;
} }
/* SABRE error handling support. */ /* SABRE error handling support. */
......
...@@ -270,25 +270,33 @@ static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void ...@@ -270,25 +270,33 @@ static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void
} }
} }
static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
unsigned int ino)
{
ino &= PCI_IRQ_INO;
return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
}
static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
unsigned int ino)
{
ino &= PCI_IRQ_INO;
return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
}
static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev, struct pci_dev *pdev,
unsigned int ino) unsigned int ino)
{ {
struct ino_bucket *bucket;
unsigned long imap, iclr; unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int ign_fixup; int ign_fixup;
int virt_irq;
ino &= PCI_IRQ_INO; ino &= PCI_IRQ_INO;
imap_off = schizo_imap_offset(ino);
/* Now build the IRQ bucket. */ /* Now build the IRQ bucket. */
imap = pbm->pbm_regs + imap_off; imap = schizo_ino_to_imap(pbm, ino);
imap += 4; iclr = schizo_ino_to_iclr(pbm, ino);
iclr_off = schizo_iclr_offset(ino);
iclr = pbm->pbm_regs + iclr_off;
iclr += 4;
/* On Schizo, no inofixup occurs. This is because each /* On Schizo, no inofixup occurs. This is because each
* INO has it's own IMAP register. On Psycho and Sabre * INO has it's own IMAP register. On Psycho and Sabre
...@@ -305,19 +313,17 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, ...@@ -305,19 +313,17 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
ign_fixup = (1 << 6); ign_fixup = (1 << 6);
} }
bucket = __bucket(build_irq(ign_fixup, iclr, imap)); virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI);
bucket->flags |= IBF_PCI;
if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
struct irq_desc *p = bucket->irq_info; irq_install_pre_handler(virt_irq,
tomatillo_wsync_handler,
p->pre_handler = tomatillo_wsync_handler; ((pbm->chip_version <= 4) ?
p->pre_handler_arg1 = ((pbm->chip_version <= 4) ? (void *) 1 : (void *) 0),
(void *) 1 : (void *) 0); (void *) pbm->sync_reg);
p->pre_handler_arg2 = (void *) pbm->sync_reg;
} }
return __irq(bucket); return virt_irq;
} }
/* SCHIZO error handling support. */ /* SCHIZO error handling support. */
...@@ -358,7 +364,6 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino) ...@@ -358,7 +364,6 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq) static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
{ {
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
struct ino_bucket *bucket;
unsigned long iclr; unsigned long iclr;
/* Do not clear the interrupt for the other PCI bus. /* Do not clear the interrupt for the other PCI bus.
...@@ -376,11 +381,11 @@ static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq) ...@@ -376,11 +381,11 @@ static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
else else
pbm = &p->pbm_A; pbm = &p->pbm_A;
irq = schizo_irq_build(pbm, NULL, schizo_irq_build(pbm, NULL,
(pbm->portid << 6) | (irq & IMAP_INO)); (pbm->portid << 6) | (irq & IMAP_INO));
bucket = __bucket(irq);
iclr = bucket->iclr;
iclr = schizo_ino_to_iclr(pbm,
(pbm->portid << 6) | (irq & IMAP_INO));
upa_writel(ICLR_IDLE, iclr); upa_writel(ICLR_IDLE, iclr);
} }
...@@ -1125,7 +1130,6 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1125,7 +1130,6 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
{ {
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
unsigned int irq; unsigned int irq;
struct ino_bucket *bucket;
u64 tmp, err_mask, err_no_mask; u64 tmp, err_mask, err_no_mask;
/* Build IRQs and register handlers. */ /* Build IRQs and register handlers. */
...@@ -1137,8 +1141,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1137,8 +1141,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + upa_writel(tmp, (pbm->pbm_regs +
schizo_imap_offset(SCHIZO_UE_INO) + 4)); schizo_imap_offset(SCHIZO_UE_INO) + 4));
...@@ -1150,8 +1153,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1150,8 +1153,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + upa_writel(tmp, (pbm->pbm_regs +
schizo_imap_offset(SCHIZO_CE_INO) + 4)); schizo_imap_offset(SCHIZO_CE_INO) + 4));
...@@ -1164,8 +1166,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1164,8 +1166,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
tmp = upa_readl(bucket->imap); SCHIZO_PCIERR_A_INO)));
upa_writel(tmp, (pbm->pbm_regs + upa_writel(tmp, (pbm->pbm_regs +
schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
...@@ -1178,8 +1180,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1178,8 +1180,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
tmp = upa_readl(bucket->imap); SCHIZO_PCIERR_B_INO)));
upa_writel(tmp, (pbm->pbm_regs + upa_writel(tmp, (pbm->pbm_regs +
schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
...@@ -1191,8 +1193,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p) ...@@ -1191,8 +1193,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
tmp = upa_readl(bucket->imap); SCHIZO_SERR_INO)));
upa_writel(tmp, (pbm->pbm_regs + upa_writel(tmp, (pbm->pbm_regs +
schizo_imap_offset(SCHIZO_SERR_INO) + 4)); schizo_imap_offset(SCHIZO_SERR_INO) + 4));
...@@ -1263,7 +1265,6 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1263,7 +1265,6 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
{ {
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
unsigned int irq; unsigned int irq;
struct ino_bucket *bucket;
u64 tmp, err_mask, err_no_mask; u64 tmp, err_mask, err_no_mask;
/* Build IRQs and register handlers. */ /* Build IRQs and register handlers. */
...@@ -1275,8 +1276,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1275,8 +1276,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4)); upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
pbm = pbm_for_ino(p, SCHIZO_CE_INO); pbm = pbm_for_ino(p, SCHIZO_CE_INO);
...@@ -1287,8 +1287,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1287,8 +1287,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4)); upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
...@@ -1299,8 +1298,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1299,8 +1298,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
...@@ -1311,8 +1309,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1311,8 +1309,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
pbm = pbm_for_ino(p, SCHIZO_SERR_INO); pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
...@@ -1323,8 +1320,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p) ...@@ -1323,8 +1320,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
pbm->name); pbm->name);
prom_halt(); prom_halt();
} }
bucket = __bucket(irq); tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
tmp = upa_readl(bucket->imap);
upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4)); upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
/* Enable UE and CE interrupts for controller. */ /* Enable UE and CE interrupts for controller. */
......
...@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino) ...@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
iclr += ((unsigned long)sbus_level - 1UL) * 8UL; iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
} }
return build_irq(sbus_level, iclr, imap); return build_irq(sbus_level, iclr, imap, 0);
} }
/* Error interrupt handling. */ /* Error interrupt handling. */
......
...@@ -98,13 +98,22 @@ extern struct ino_bucket ivector_table[NUM_IVECS]; ...@@ -98,13 +98,22 @@ extern struct ino_bucket ivector_table[NUM_IVECS];
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq)) #define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket)) #define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
#define NR_IRQS 16 /* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
* the type of ino_bucket->virt_irq as appropriate.
*
* ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
*/
#define NR_IRQS 255
extern void irq_install_pre_handler(int virt_irq,
void (*func)(struct ino_bucket *, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq) #define irq_canonicalize(irq) (irq)
extern void disable_irq(unsigned int); extern void disable_irq(unsigned int);
#define disable_irq_nosync disable_irq #define disable_irq_nosync disable_irq
extern void enable_irq(unsigned int); extern void enable_irq(unsigned int);
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap); extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags); extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags);
extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment