Commit 0b0f9dc5 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin

Revert "virtio_pci: use shared interrupts for virtqueues"

This reverts commit 07ec5148.

Conflicts:
	drivers/virtio/virtio_pci_common.c

Unfortunately the idea does not work with threadirqs
as more than 32 queues can then map to a single interrupts.

Further, the cleanup seems to be one of the changes that broke
hybernation for some users. We are still not sure why
but revert helps.

This reverts the cleanup changes but keeps the affinity support.
Tested-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 2008c154
...@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev) ...@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i; int i;
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); if (vp_dev->intx_enabled)
for (i = 1; i < vp_dev->msix_vectors; i++) synchronize_irq(vp_dev->pci_dev->irq);
for (i = 0; i < vp_dev->msix_vectors; ++i)
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
} }
...@@ -97,10 +99,79 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -97,10 +99,79 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return vp_vring_interrupt(irq, opaque); return vp_vring_interrupt(irq, opaque);
} }
static void vp_remove_vqs(struct virtio_device *vdev) static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
bool per_vq_vectors, struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
unsigned i, v;
int err = -ENOMEM;
vp_dev->msix_vectors = nvectors;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL);
if (!vp_dev->msix_names)
goto error;
vp_dev->msix_affinity_masks
= kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
GFP_KERNEL);
if (!vp_dev->msix_affinity_masks)
goto error;
for (i = 0; i < nvectors; ++i)
if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
GFP_KERNEL))
goto error;
err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
nvectors, PCI_IRQ_MSIX |
(desc ? PCI_IRQ_AFFINITY : 0),
desc);
if (err < 0)
goto error;
vp_dev->msix_enabled = 1;
/* Set the vector used for configuration */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
++vp_dev->msix_used_vectors;
v = vp_dev->config_vector(vp_dev, v);
/* Verify we had enough resources to assign the vector */
if (v == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto error;
}
if (!per_vq_vectors) {
/* Shared vector for all VQs */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
++vp_dev->msix_used_vectors;
}
return 0;
error:
return err;
}
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq, *n; struct virtqueue *vq, *n;
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) { list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
if (vp_dev->msix_vector_map) { if (vp_dev->msix_vector_map) {
...@@ -112,33 +183,35 @@ static void vp_remove_vqs(struct virtio_device *vdev) ...@@ -112,33 +183,35 @@ static void vp_remove_vqs(struct virtio_device *vdev)
} }
vp_dev->del_vq(vq); vp_dev->del_vq(vq);
} }
}
/* the config->del_vqs() implementation */ if (vp_dev->intx_enabled) {
void vp_del_vqs(struct virtio_device *vdev) free_irq(vp_dev->pci_dev->irq, vp_dev);
{ vp_dev->intx_enabled = 0;
struct virtio_pci_device *vp_dev = to_vp_device(vdev); }
int i;
if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
return;
vp_remove_vqs(vdev); for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
if (vp_dev->msix_enabled) { for (i = 0; i < vp_dev->msix_vectors; i++)
for (i = 0; i < vp_dev->msix_vectors; i++) if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]); free_cpumask_var(vp_dev->msix_affinity_masks[i]);
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */ /* Disable the vector used for configuration */
vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
kfree(vp_dev->msix_affinity_masks); pci_free_irq_vectors(vp_dev->pci_dev);
kfree(vp_dev->msix_names); vp_dev->msix_enabled = 0;
kfree(vp_dev->msix_vector_map);
} }
free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); vp_dev->msix_vectors = 0;
pci_free_irq_vectors(vp_dev->pci_dev); vp_dev->msix_used_vectors = 0;
kfree(vp_dev->msix_names);
vp_dev->msix_names = NULL;
kfree(vp_dev->msix_affinity_masks);
vp_dev->msix_affinity_masks = NULL;
kfree(vp_dev->msix_vector_map);
vp_dev->msix_vector_map = NULL;
} }
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
...@@ -147,128 +220,80 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -147,128 +220,80 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc) struct irq_affinity *desc)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
int i, err = -ENOMEM, allocated_vectors, nvectors;
unsigned flags = PCI_IRQ_MSIX;
u16 msix_vec; u16 msix_vec;
int i, err, nvectors, allocated_vectors;
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
}
nvectors = 1;
for (i = 0; i < nvqs; i++)
if (callbacks[i])
nvectors++;
if (per_vq_vectors) { if (per_vq_vectors) {
err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, /* Best option: one for change interrupt, one per vq. */
nvectors, flags, desc); nvectors = 1;
for (i = 0; i < nvqs; ++i)
if (callbacks[i])
++nvectors;
} else { } else {
err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, /* Second best: one for change, shared for all vqs. */
PCI_IRQ_MSIX); nvectors = 2;
}
if (err < 0)
return err;
vp_dev->msix_vectors = nvectors;
vp_dev->msix_names = kmalloc_array(nvectors,
sizeof(*vp_dev->msix_names), GFP_KERNEL);
if (!vp_dev->msix_names)
goto out_free_irq_vectors;
vp_dev->msix_affinity_masks = kcalloc(nvectors,
sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
if (!vp_dev->msix_affinity_masks)
goto out_free_msix_names;
for (i = 0; i < nvectors; ++i) {
if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
GFP_KERNEL))
goto out_free_msix_affinity_masks;
} }
/* Set the vector used for configuration */ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), per_vq_vectors ? desc : NULL);
"%s-config", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
0, vp_dev->msix_names[0], vp_dev);
if (err) if (err)
goto out_free_irq_vectors; goto error_find;
/* Verify we had enough resources to assign the vector */ if (per_vq_vectors) {
if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { vp_dev->msix_vector_map = kmalloc_array(nvqs,
err = -EBUSY; sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
goto out_free_config_irq; if (!vp_dev->msix_vector_map)
goto error_find;
} }
vp_dev->msix_vector_map = kmalloc_array(nvqs, allocated_vectors = vp_dev->msix_used_vectors;
sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
if (!vp_dev->msix_vector_map)
goto out_disable_config_irq;
allocated_vectors = 1; /* vector 0 is the config interrupt */
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
vqs[i] = NULL; vqs[i] = NULL;
continue; continue;
} }
if (callbacks[i]) if (!callbacks[i])
msix_vec = allocated_vectors;
else
msix_vec = VIRTIO_MSI_NO_VECTOR; msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (per_vq_vectors)
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
msix_vec); msix_vec);
if (IS_ERR(vqs[i])) { if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]); err = PTR_ERR(vqs[i]);
goto out_remove_vqs; goto error_find;
} }
if (!per_vq_vectors)
continue;
if (msix_vec == VIRTIO_MSI_NO_VECTOR) { if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
continue; continue;
} }
snprintf(vp_dev->msix_names[i + 1], /* allocate per-vq irq if available and necessary */
sizeof(*vp_dev->msix_names), "%s-%s", snprintf(vp_dev->msix_names[msix_vec],
sizeof *vp_dev->msix_names,
"%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]); dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, IRQF_SHARED, vring_interrupt, 0,
vp_dev->msix_names[i + 1], vqs[i]); vp_dev->msix_names[msix_vec],
vqs[i]);
if (err) { if (err) {
/* don't free this irq on error */ /* don't free this irq on error */
vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
goto out_remove_vqs; goto error_find;
} }
vp_dev->msix_vector_map[i] = msix_vec; vp_dev->msix_vector_map[i] = msix_vec;
if (per_vq_vectors)
allocated_vectors++;
} }
vp_dev->msix_enabled = 1;
return 0; return 0;
out_remove_vqs: error_find:
vp_remove_vqs(vdev); vp_del_vqs(vdev);
kfree(vp_dev->msix_vector_map);
out_disable_config_irq:
vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
out_free_config_irq:
free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
out_free_msix_affinity_masks:
for (i = 0; i < nvectors; i++) {
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
kfree(vp_dev->msix_affinity_masks);
out_free_msix_names:
kfree(vp_dev->msix_names);
out_free_irq_vectors:
pci_free_irq_vectors(vp_dev->pci_dev);
return err; return err;
} }
...@@ -282,8 +307,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, ...@@ -282,8 +307,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev); dev_name(&vdev->dev), vp_dev);
if (err) if (err)
return err; goto out_del_vqs;
vp_dev->intx_enabled = 1;
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
vqs[i] = NULL; vqs[i] = NULL;
...@@ -293,15 +319,13 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, ...@@ -293,15 +319,13 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
VIRTIO_MSI_NO_VECTOR); VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) { if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]); err = PTR_ERR(vqs[i]);
goto out_remove_vqs; goto out_del_vqs;
} }
} }
return 0; return 0;
out_del_vqs:
out_remove_vqs: vp_del_vqs(vdev);
vp_remove_vqs(vdev);
free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
return err; return err;
} }
......
...@@ -66,12 +66,16 @@ struct virtio_pci_device { ...@@ -66,12 +66,16 @@ struct virtio_pci_device {
/* MSI-X support */ /* MSI-X support */
int msix_enabled; int msix_enabled;
int intx_enabled;
cpumask_var_t *msix_affinity_masks; cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough, /* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */ * and I'm too lazy to allocate each name separately. */
char (*msix_names)[256]; char (*msix_names)[256];
/* Total Number of MSI-X vectors (including per-VQ ones). */ /* Number of available vectors */
int msix_vectors; unsigned msix_vectors;
/* Vectors allocated, excluding per-vq vectors if any */
unsigned msix_used_vectors;
/* Map of per-VQ MSI-X vectors, may be NULL */ /* Map of per-VQ MSI-X vectors, may be NULL */
unsigned *msix_vector_map; unsigned *msix_vector_map;
...@@ -85,6 +89,14 @@ struct virtio_pci_device { ...@@ -85,6 +89,14 @@ struct virtio_pci_device {
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
}; };
/* Constants for MSI-X */
/* Use first vector for configuration changes, second and the rest for
* virtqueues Thus, we need at least 2 vectors for MSI. */
enum {
VP_MSIX_CONFIG_VECTOR = 0,
VP_MSIX_VQ_VECTOR = 1,
};
/* Convert a generic virtio device to our structure */ /* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment