Commit 21c80c9f authored by Keith Busch's avatar Keith Busch Committed by Bjorn Helgaas

x86/PCI: VMD: Fix infinite loop executing irq's

We can't initialize the list head on deletion as this causes the node to
point to itself, which causes an infinite loop if vmd_irq() happens to be
servicing that node.

The list initialization was trying to fix a bug from multiple calls to
disable the same IRQ.  Fix this instead by having the VMD driver track if
the interrupt is enabled.

[bhelgaas: changelog, add "Fixes"]
Fixes: 97e92306 ("x86/PCI: VMD: Initialize list item in IRQ disable")
Reported-by: default avatarGrzegorz Koczot <grzegorz.koczot@intel.com>
Tested-by: default avatarMiroslaw Drost <miroslaw.drost@intel.com>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Acked-by Jon Derrick: <jonathan.derrick@intel.com>
parent 5d0bdf28
......@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
* @node: list item for parent traversal.
* @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
* @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
*
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
......@@ -50,6 +51,7 @@ struct vmd_irq {
struct list_head node;
struct rcu_head rcu;
struct vmd_irq_list *irq;
bool enabled;
unsigned int virq;
};
......@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
unsigned long flags;
raw_spin_lock_irqsave(&list_lock, flags);
WARN_ON(vmdirq->enabled);
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
vmdirq->enabled = true;
raw_spin_unlock_irqrestore(&list_lock, flags);
data->chip->irq_unmask(data);
......@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
data->chip->irq_mask(data);
raw_spin_lock_irqsave(&list_lock, flags);
list_del_rcu(&vmdirq->node);
INIT_LIST_HEAD_RCU(&vmdirq->node);
if (vmdirq->enabled) {
list_del_rcu(&vmdirq->node);
vmdirq->enabled = false;
}
raw_spin_unlock_irqrestore(&list_lock, flags);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment