Commit f53c027a authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ia64 cpu hotplug: IRQ affinity work

From: Ashok Raj <ashok.raj@intel.com>

irq affinity setting via /proc was forcing iosapic rte programming by force.
The correct way to do this is to perform this when a interrupt is pending.
parent 68a50f57
...@@ -98,6 +98,7 @@ ...@@ -98,6 +98,7 @@
#endif #endif
static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED; static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ /* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
...@@ -331,6 +332,21 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) ...@@ -331,6 +332,21 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
#endif #endif
} }
static inline void move_irq(int irq)
{
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
}
/* /*
* Handlers for level-triggered interrupts. * Handlers for level-triggered interrupts.
*/ */
...@@ -347,6 +363,7 @@ iosapic_end_level_irq (unsigned int irq) ...@@ -347,6 +363,7 @@ iosapic_end_level_irq (unsigned int irq)
{ {
ia64_vector vec = irq_to_vector(irq); ia64_vector vec = irq_to_vector(irq);
move_irq(irq);
writel(vec, iosapic_intr_info[vec].addr + IOSAPIC_EOI); writel(vec, iosapic_intr_info[vec].addr + IOSAPIC_EOI);
} }
...@@ -386,6 +403,8 @@ static void ...@@ -386,6 +403,8 @@ static void
iosapic_ack_edge_irq (unsigned int irq) iosapic_ack_edge_irq (unsigned int irq)
{ {
irq_desc_t *idesc = irq_descp(irq); irq_desc_t *idesc = irq_descp(irq);
move_irq(irq);
/* /*
* Once we have recorded IRQ_PENDING already, we can mask the * Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled * interrupt for real. This prevents IRQ storms from unhandled
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/irq.h> #include <asm/irq.h>
extern cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
/* /*
* Linux has a controller-independent x86 interrupt architecture. * Linux has a controller-independent x86 interrupt architecture.
...@@ -938,7 +938,9 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) ...@@ -938,7 +938,9 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
static int irq_affinity_read_proc (char *page, char **start, off_t off, static int irq_affinity_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
{ {
int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
if (count - len < 2) if (count - len < 2)
return -EINVAL; return -EINVAL;
len += sprintf(page + len, "\n"); len += sprintf(page + len, "\n");
...@@ -956,6 +958,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer, ...@@ -956,6 +958,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
int rlen; int rlen;
int prelen; int prelen;
irq_desc_t *desc = irq_descp(irq); irq_desc_t *desc = irq_descp(irq);
unsigned long flags;
if (!desc->handler->set_affinity) if (!desc->handler->set_affinity)
return -EIO; return -EIO;
...@@ -994,7 +997,10 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer, ...@@ -994,7 +997,10 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
if (cpus_empty(tmp)) if (cpus_empty(tmp))
return -EINVAL; return -EINVAL;
desc->handler->set_affinity(irq, new_value); spin_lock_irqsave(&desc->lock, flags);
pending_irq_cpumask[irq] = new_value;
spin_unlock_irqrestore(&desc->lock, flags);
return full_count; return full_count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment