Commit 57b150cc authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

irq: only update affinity if ->set_affinity() is sucessfull

irq_set_affinity() and move_masked_irq() try to assign affinity
before calling chip set_affinity(). Some archs are assigning it
in ->set_affinity() again.

We do something like:

 cpumask_cpy(desc->affinity, mask);
 desc->chip->set_affinity(mask);

But in the failure path, affinity should not be touched - otherwise
we'll end up with a different affinity mask despite the failure to
migrate the IRQ.

So try to update the afffinity only if set_affinity returns with 0.
Also call irq_set_thread_affinity accordingly.

v2: update after "irq, x86: Remove IRQ_DISABLED check in process context IRQ move"
v3: according to Ingo, change set_affinity() in irq_chip should return int.
v4: update comments by removing moving irq_desc code.

[ Impact: fix /proc/irq/*/smp_affinity setting corner case bug ]
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <49F65509.60307@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d5dedd45
...@@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq, ...@@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq,
extern int irq_select_affinity_usr(unsigned int irq); extern int irq_select_affinity_usr(unsigned int irq);
extern void
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
/* /*
* Debugging printout: * Debugging printout:
*/ */
......
...@@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq) ...@@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq)
return 1; return 1;
} }
static void void
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
{ {
struct irqaction *action = desc->action; struct irqaction *action = desc->action;
...@@ -109,17 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) ...@@ -109,17 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
spin_lock_irqsave(&desc->lock, flags); spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ #ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) if (desc->status & IRQ_MOVE_PCNTXT) {
desc->chip->set_affinity(irq, cpumask); if (!desc->chip->set_affinity(irq, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
irq_set_thread_affinity(desc, cpumask);
}
}
else { else {
desc->status |= IRQ_MOVE_PENDING; desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(desc->pending_mask, cpumask); cpumask_copy(desc->pending_mask, cpumask);
} }
#else #else
if (!desc->chip->set_affinity(irq, cpumask)) {
cpumask_copy(desc->affinity, cpumask); cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
irq_set_thread_affinity(desc, cpumask); irq_set_thread_affinity(desc, cpumask);
}
#endif
desc->status |= IRQ_AFFINITY_SET; desc->status |= IRQ_AFFINITY_SET;
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
return 0; return 0;
......
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/interrupt.h>
#include "internals.h"
void move_masked_irq(int irq) void move_masked_irq(int irq)
{ {
...@@ -39,11 +42,12 @@ void move_masked_irq(int irq) ...@@ -39,11 +42,12 @@ void move_masked_irq(int irq)
* masking the irqs. * masking the irqs.
*/ */
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids)) { < nr_cpu_ids))
cpumask_and(desc->affinity, if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
desc->pending_mask, cpu_online_mask); cpumask_copy(desc->affinity, desc->pending_mask);
desc->chip->set_affinity(irq, desc->affinity); irq_set_thread_affinity(desc, desc->pending_mask);
} }
cpumask_clear(desc->pending_mask); cpumask_clear(desc->pending_mask);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment