Commit 78139c94 authored by Tonghao Zhang's avatar Tonghao Zhang Committed by David S. Miller

net: vhost: lock the vqs one by one

This patch changes the way that lock all vqs
at the same, to lock them one by one. It will
be used for next patch to avoid the deadlock.
Signed-off-by: default avatarTonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af4325ec
...@@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) ...@@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
{ {
int i; int i;
for (i = 0; i < d->nvqs; ++i) for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
__vhost_vq_meta_reset(d->vqs[i]); __vhost_vq_meta_reset(d->vqs[i]);
mutex_unlock(&d->vqs[i]->mutex);
}
} }
static void vhost_vq_reset(struct vhost_dev *dev, static void vhost_vq_reset(struct vhost_dev *dev,
...@@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, ...@@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_get_used(vq, x, ptr) \ #define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_unlock(&d->vqs[i]->mutex);
}
static int vhost_new_umem_range(struct vhost_umem *umem, static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end, u64 start, u64 size, u64 end,
u64 userspace_addr, int perm) u64 userspace_addr, int perm)
...@@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, ...@@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
if (msg->iova <= vq_msg->iova && if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 >= vq_msg->iova && msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) { vq_msg->type == VHOST_IOTLB_MISS) {
mutex_lock(&node->vq->mutex);
vhost_poll_queue(&node->vq->poll); vhost_poll_queue(&node->vq->poll);
mutex_unlock(&node->vq->mutex);
list_del(&node->node); list_del(&node->node);
kfree(node); kfree(node);
} }
...@@ -986,7 +978,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ...@@ -986,7 +978,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
int ret = 0; int ret = 0;
mutex_lock(&dev->mutex); mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) { switch (msg->type) {
case VHOST_IOTLB_UPDATE: case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) { if (!dev->iotlb) {
...@@ -1020,7 +1011,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ...@@ -1020,7 +1011,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break; break;
} }
vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex); mutex_unlock(&dev->mutex);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment