Commit 2f1835df authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Ingo Molnar:
 "The changes in this cycle were:

   - Remove the irq timings/variance statistics code that tried to
     predict when the next interrupt would occur, which didn't work out
     as hoped and is replaced by another mechanism.

   - This new mechanism is the 'array suffix computation' estimate,
     which is superior to the previous one as it can detect not just a
     single periodic pattern, but independent periodic patterns along a
     log-2 scale of bucketing and exponential moving average. The
     comments are longer than the code - and it works better at
     predicting various complex interrupt patterns from real-world
     devices than the previous estimate.

   - avoid IRQ-work self-IPIs on the local CPU

   - fix work-list corruption in irq_set_affinity_notifier()"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irq_work: Do not raise an IPI when queueing work on the local CPU
  genirq/devres: Use struct_size() in devm_kzalloc()
  genirq/timings: Add array suffix computation code
  genirq/timings: Remove variance computation code
  genirq: Prevent use-after-free and work list corruption
parents d90dcc1f 471ba0e6
...@@ -220,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, ...@@ -220,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
irq_flow_handler_t handler) irq_flow_handler_t handler)
{ {
struct irq_chip_generic *gc; struct irq_chip_generic *gc;
unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
gc = devm_kzalloc(dev, sz, GFP_KERNEL); gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL);
if (gc) if (gc)
irq_init_generic_chip(gc, name, num_ct, irq_init_generic_chip(gc, name, num_ct,
irq_base, reg_base, handler); irq_base, reg_base, handler);
......
...@@ -357,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) ...@@ -357,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
desc->affinity_notify = notify; desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags); raw_spin_unlock_irqrestore(&desc->lock, flags);
if (old_notify) if (old_notify) {
cancel_work_sync(&old_notify->work);
kref_put(&old_notify->kref, old_notify->release); kref_put(&old_notify->kref, old_notify->release);
}
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -56,6 +56,36 @@ void __weak arch_irq_work_raise(void) ...@@ -56,6 +56,36 @@ void __weak arch_irq_work_raise(void)
*/ */
} }
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{
/* If the work is "lazy", handle it from next tick if any */
if (work->flags & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
} else {
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
arch_irq_work_raise();
}
}
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
__irq_work_queue_local(work);
preempt_enable();
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
/* /*
* Enqueue the irq_work @work on @cpu unless it's already pending * Enqueue the irq_work @work on @cpu unless it's already pending
* somewhere. * somewhere.
...@@ -64,53 +94,32 @@ void __weak arch_irq_work_raise(void) ...@@ -64,53 +94,32 @@ void __weak arch_irq_work_raise(void)
*/ */
bool irq_work_queue_on(struct irq_work *work, int cpu) bool irq_work_queue_on(struct irq_work *work, int cpu)
{ {
#ifndef CONFIG_SMP
return irq_work_queue(work);
#else /* CONFIG_SMP: */
/* All work should have been flushed before going offline */ /* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu)); WARN_ON_ONCE(cpu_is_offline(cpu));
#ifdef CONFIG_SMP
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
/* Only queue if not already pending */ /* Only queue if not already pending */
if (!irq_work_claim(work)) if (!irq_work_claim(work))
return false; return false;
preempt_disable();
if (cpu != smp_processor_id()) {
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
#else /* #ifdef CONFIG_SMP */
irq_work_queue(work);
#endif /* #else #ifdef CONFIG_SMP */
return true;
}
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
/* If the work is "lazy", handle it from next tick if any */
if (work->flags & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
} else { } else {
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) __irq_work_queue_local(work);
arch_irq_work_raise();
} }
preempt_enable(); preempt_enable();
return true; return true;
#endif /* CONFIG_SMP */
} }
EXPORT_SYMBOL_GPL(irq_work_queue);
bool irq_work_needs_cpu(void) bool irq_work_needs_cpu(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment