Commit 9e69efd4 authored by David S. Miller's avatar David S. Miller

Merge branch 'vhost-fixes'

Jason Wang says:

====================
Fix various issue of vhost

This series tries to fix various issues of vhost:

- Patch 1 adds a missing write barrier between used idx updating and
  logging.
- Patch 2-3 brings back the protection of device IOTLB through vq
  mutex, this fixes possible use after free in device IOTLB entries.

Please consider them for -stable.

Changes from V2:
- drop dirty page fix and make it for net-next
Changes from V1:
- silent compiler warning for 32bit.
- use mutex_trylock() on slowpath instead of mutex_lock() even on fast
  path.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3b076cfe 86a07da3
......@@ -513,7 +513,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
struct socket *sock;
struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
/* Try to hold the vq mutex of the paired virtqueue. We can't
* use mutex_lock() here since we could not guarantee a
* consistenet lock ordering.
*/
if (!mutex_trylock(&vq->mutex))
return;
vhost_disable_notify(&net->dev, vq);
sock = rvq->private_data;
......
......@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
{
int i;
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
for (i = 0; i < d->nvqs; ++i)
__vhost_vq_meta_reset(d->vqs[i]);
mutex_unlock(&d->vqs[i]->mutex);
}
}
static void vhost_vq_reset(struct vhost_dev *dev,
......@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_unlock(&d->vqs[i]->mutex);
}
static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
......@@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
int ret = 0;
mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) {
......@@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex);
return ret;
......@@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment