Commit 2b34218e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Ingo Molnar:
 "A CPU hotplug related fix, plus two related sanity checks"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq/cpuhotplug: Enforce affinity setting on startup of managed irqs
  genirq/cpuhotplug: Add sanity check for effective affinity mask
  genirq: Warn when effective affinity is not updated
parents a515d05e e43b3b58
...@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) ...@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
irq_setup_affinity(desc); irq_setup_affinity(desc);
break; break;
case IRQ_STARTUP_MANAGED: case IRQ_STARTUP_MANAGED:
irq_do_set_affinity(d, aff, false);
ret = __irq_startup(desc); ret = __irq_startup(desc);
irq_set_affinity_locked(d, aff, false);
break; break;
case IRQ_STARTUP_ABORT: case IRQ_STARTUP_ABORT:
return 0; return 0;
......
...@@ -18,8 +18,34 @@ ...@@ -18,8 +18,34 @@
static inline bool irq_needs_fixup(struct irq_data *d) static inline bool irq_needs_fixup(struct irq_data *d)
{ {
const struct cpumask *m = irq_data_get_effective_affinity_mask(d); const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
unsigned int cpu = smp_processor_id();
return cpumask_test_cpu(smp_processor_id(), m); #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
/*
* The cpumask_empty() check is a workaround for interrupt chips,
* which do not implement effective affinity, but the architecture has
* enabled the config switch. Use the general affinity mask instead.
*/
if (cpumask_empty(m))
m = irq_data_get_affinity_mask(d);
/*
* Sanity check. If the mask is not empty when excluding the outgoing
* CPU then it must contain at least one online CPU. The outgoing CPU
* has been removed from the online mask already.
*/
if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
/*
* If this happens then there was a missed IRQ fixup at some
* point. Warn about it and enforce fixup.
*/
pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
cpumask_pr_args(m), d->irq, cpu);
return true;
}
#endif
return cpumask_test_cpu(cpu, m);
} }
static bool migrate_one_irq(struct irq_desc *desc) static bool migrate_one_irq(struct irq_desc *desc)
......
...@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc) ...@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
set_bit(IRQTF_AFFINITY, &action->thread_flags); set_bit(IRQTF_AFFINITY, &action->thread_flags);
} }
static void irq_validate_effective_affinity(struct irq_data *data)
{
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
if (!cpumask_empty(m))
return;
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
chip->name, data->irq);
#endif
}
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force) bool force)
{ {
...@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret; int ret;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
ret = chip->irq_set_affinity(data, mask, force); ret = chip->irq_set_affinity(data, mask, force);
switch (ret) { switch (ret) {
case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE: case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask); cpumask_copy(desc->irq_common_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY: case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc); irq_set_thread_affinity(desc);
ret = 0; ret = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment