Commit abe71eb3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Fixes all over the place, most notably fixes for latent bugs in
  drivers that got exposed by suppressing interrupts before DRIVER_OK,
  which in turn has been done by 8b4ec69d ("virtio: harden vring
  IRQ")"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  um: virt-pci: set device ready in probe()
  vdpa: make get_vq_group and set_group_asid optional
  virtio: Fix all occurences of the "the the" typo
  vduse: Fix NULL pointer dereference on sysfs access
  vringh: Fix loop descriptors check in the indirect cases
  vdpa/mlx5: clean up indenting in handle_ctrl_vlan()
  vdpa/mlx5: fix error code for deleting vlan
  virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed
  vdpa/mlx5: Fix syntax errors in comments
  virtio-rng: make device ready before making request
parents 0678afa6 eacea844
...@@ -544,6 +544,8 @@ static int um_pci_init_vqs(struct um_pci_device *dev) ...@@ -544,6 +544,8 @@ static int um_pci_init_vqs(struct um_pci_device *dev)
dev->cmd_vq = vqs[0]; dev->cmd_vq = vqs[0];
dev->irq_vq = vqs[1]; dev->irq_vq = vqs[1];
virtio_device_ready(dev->vdev);
for (i = 0; i < NUM_IRQ_MSGS; i++) { for (i = 0; i < NUM_IRQ_MSGS; i++) {
void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL); void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
...@@ -587,7 +589,7 @@ static int um_pci_virtio_probe(struct virtio_device *vdev) ...@@ -587,7 +589,7 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
dev->irq = irq_alloc_desc(numa_node_id()); dev->irq = irq_alloc_desc(numa_node_id());
if (dev->irq < 0) { if (dev->irq < 0) {
err = dev->irq; err = dev->irq;
goto error; goto err_reset;
} }
um_pci_devices[free].dev = dev; um_pci_devices[free].dev = dev;
vdev->priv = dev; vdev->priv = dev;
...@@ -604,6 +606,9 @@ static int um_pci_virtio_probe(struct virtio_device *vdev) ...@@ -604,6 +606,9 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
um_pci_rescan(); um_pci_rescan();
return 0; return 0;
err_reset:
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
error: error:
mutex_unlock(&um_pci_mtx); mutex_unlock(&um_pci_mtx);
kfree(dev); kfree(dev);
......
...@@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev) ...@@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev)
goto err_find; goto err_find;
} }
virtio_device_ready(vdev);
/* we always have a pending entropy request */ /* we always have a pending entropy request */
request_entropy(vi); request_entropy(vi);
......
...@@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue { ...@@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue {
/* Resources for implementing the notification channel from the device /* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the * to the driver. fwqp is the firmware end of an RC connection; the
* other end is vqqp used by the driver. cq is is where completions are * other end is vqqp used by the driver. cq is where completions are
* reported. * reported.
*/ */
struct mlx5_vdpa_cq cq; struct mlx5_vdpa_cq cq;
...@@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
id = mlx5vdpa16_to_cpu(mvdev, vlan); id = mlx5vdpa16_to_cpu(mvdev, vlan);
mac_vlan_del(ndev, ndev->config.mac, id, true); mac_vlan_del(ndev, ndev->config.mac, id, true);
status = VIRTIO_NET_OK;
break; break;
default: default:
break; break;
} }
return status; return status;
} }
static void mlx5_cvq_kick_handler(struct work_struct *work) static void mlx5_cvq_kick_handler(struct work_struct *work)
......
...@@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config, ...@@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->minor = ret; dev->minor = ret;
dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT; dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
dev->dev = device_create(vduse_class, NULL, dev->dev = device_create_with_groups(vduse_class, NULL,
MKDEV(MAJOR(vduse_major), dev->minor), MKDEV(MAJOR(vduse_major), dev->minor),
dev, "%s", config->name); dev, vduse_dev_groups, "%s", config->name);
if (IS_ERR(dev->dev)) { if (IS_ERR(dev->dev)) {
ret = PTR_ERR(dev->dev); ret = PTR_ERR(dev->dev);
goto err_dev; goto err_dev;
...@@ -1596,7 +1596,6 @@ static int vduse_init(void) ...@@ -1596,7 +1596,6 @@ static int vduse_init(void)
return PTR_ERR(vduse_class); return PTR_ERR(vduse_class);
vduse_class->devnode = vduse_devnode; vduse_class->devnode = vduse_devnode;
vduse_class->dev_groups = vduse_dev_groups;
ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse"); ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
if (ret) if (ret)
......
...@@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ...@@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
ops->set_vq_ready(vdpa, idx, s.num); ops->set_vq_ready(vdpa, idx, s.num);
return 0; return 0;
case VHOST_VDPA_GET_VRING_GROUP: case VHOST_VDPA_GET_VRING_GROUP:
if (!ops->get_vq_group)
return -EOPNOTSUPP;
s.index = idx; s.index = idx;
s.num = ops->get_vq_group(vdpa, idx); s.num = ops->get_vq_group(vdpa, idx);
if (s.num >= vdpa->ngroups) if (s.num >= vdpa->ngroups)
......
...@@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i, ...@@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
int (*copy)(const struct vringh *vrh, int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len)) void *dst, const void *src, size_t len))
{ {
int err, count = 0, up_next, desc_max; int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs; struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange; struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false; bool slow = false;
...@@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i, ...@@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i,
continue; continue;
} }
if (count++ == vrh->vring.num) { if (up_next == -1)
count++;
else
indirect_count++;
if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs); vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP; err = -ELOOP;
goto fail; goto fail;
...@@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i, ...@@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
i = return_from_indirect(vrh, &up_next, i = return_from_indirect(vrh, &up_next,
&descs, &desc_max); &descs, &desc_max);
slow = false; slow = false;
indirect_count = 0;
} else } else
break; break;
} }
......
...@@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status) ...@@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
/* /*
* Per memory-barriers.txt, wmb() is not needed to guarantee * Per memory-barriers.txt, wmb() is not needed to guarantee
* that the the cache coherent memory writes have completed * that the cache coherent memory writes have completed
* before writing to the MMIO region. * before writing to the MMIO region.
*/ */
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
...@@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device, ...@@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device,
if (!vm_cmdline_parent_registered) { if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent); err = device_register(&vm_cmdline_parent);
if (err) { if (err) {
put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n"); pr_err("Failed to register parent device!\n");
return err; return err;
} }
......
...@@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev, ...@@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
/* /*
* Per memory-barriers.txt, wmb() is not needed to guarantee * Per memory-barriers.txt, wmb() is not needed to guarantee
* that the the cache coherent memory writes have completed * that the cache coherent memory writes have completed
* before writing to the MMIO region. * before writing to the MMIO region.
*/ */
vp_iowrite8(status, &cfg->device_status); vp_iowrite8(status, &cfg->device_status);
......
...@@ -178,7 +178,8 @@ struct vdpa_map_file { ...@@ -178,7 +178,8 @@ struct vdpa_map_file {
* for the device * for the device
* @vdev: vdpa device * @vdev: vdpa device
* Returns virtqueue algin requirement * Returns virtqueue algin requirement
* @get_vq_group: Get the group id for a specific virtqueue * @get_vq_group: Get the group id for a specific
* virtqueue (optional)
* @vdev: vdpa device * @vdev: vdpa device
* @idx: virtqueue index * @idx: virtqueue index
* Returns u32: group id for this virtqueue * Returns u32: group id for this virtqueue
...@@ -243,7 +244,7 @@ struct vdpa_map_file { ...@@ -243,7 +244,7 @@ struct vdpa_map_file {
* Returns the iova range supported by * Returns the iova range supported by
* the device. * the device.
* @set_group_asid: Set address space identifier for a * @set_group_asid: Set address space identifier for a
* virtqueue group * virtqueue group (optional)
* @vdev: vdpa device * @vdev: vdpa device
* @group: virtqueue group * @group: virtqueue group
* @asid: address space id for this group * @asid: address space id for this group
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment