Commit 22fa90c7 authored by Asias He's avatar Asias He Committed by Michael S. Tsirkin

vhost: Remove custom vhost rcu usage

Now, vq->private_data is always accessed under vq mutex. No need to play
the vhost rcu trick.
Signed-off-by: default avatarAsias He <asias@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent e7802212
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -749,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n, ...@@ -749,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock; struct socket *sock;
sock = rcu_dereference_protected(vq->private_data, sock = vq->private_data;
lockdep_is_held(&vq->mutex));
if (!sock) if (!sock)
return 0; return 0;
...@@ -763,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, ...@@ -763,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct socket *sock; struct socket *sock;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
sock = rcu_dereference_protected(vq->private_data, sock = vq->private_data;
lockdep_is_held(&vq->mutex));
vhost_net_disable_vq(n, vq); vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, NULL); vq->private_data = NULL;
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
return sock; return sock;
} }
...@@ -922,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -922,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
} }
/* start polling new socket */ /* start polling new socket */
oldsock = rcu_dereference_protected(vq->private_data, oldsock = vq->private_data;
lockdep_is_held(&vq->mutex));
if (sock != oldsock) { if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq, ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock)); sock && vhost_sock_zcopy(sock));
...@@ -933,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -933,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
} }
vhost_net_disable_vq(n, vq); vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock); vq->private_data = sock;
r = vhost_init_used(vq); r = vhost_init_used(vq);
if (r) if (r)
goto err_used; goto err_used;
...@@ -967,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -967,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0; return 0;
err_used: err_used:
rcu_assign_pointer(vq->private_data, oldsock); vq->private_data = oldsock;
vhost_net_enable_vq(n, vq); vhost_net_enable_vq(n, vq);
if (ubufs) if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs); vhost_net_ubuf_put_wait_and_free(ubufs);
......
...@@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
sizeof(vs->vs_vhost_wwpn)); sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq; vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg); vq->private_data = vs_tpg;
vhost_init_used(vq); vhost_init_used(vq);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
...@@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
if (match) { if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq; vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL); vq->private_data = NULL;
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
} }
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test) ...@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL; priv = test ? n : NULL;
/* start polling new socket */ /* start polling new socket */
oldpriv = rcu_dereference_protected(vq->private_data, oldpriv = vq->private_data;
lockdep_is_held(&vq->mutex)); vq->private_data = priv;
rcu_assign_pointer(vq->private_data, priv);
r = vhost_init_used(&n->vqs[index]); r = vhost_init_used(&n->vqs[index]);
......
...@@ -103,14 +103,8 @@ struct vhost_virtqueue { ...@@ -103,14 +103,8 @@ struct vhost_virtqueue {
struct iovec iov[UIO_MAXIOV]; struct iovec iov[UIO_MAXIOV];
struct iovec *indirect; struct iovec *indirect;
struct vring_used_elem *heads; struct vring_used_elem *heads;
/* We use a kind of RCU to access private pointer. /* Protected by virtqueue mutex. */
* All readers access it from worker, which makes it possible to void *private_data;
* flush the vhost_work instead of synchronize_rcu. Therefore readers do
* not need to call rcu_read_lock/rcu_read_unlock: the beginning of
* vhost_work execution acts instead of rcu_read_lock() and the end of
* vhost_work execution acts instead of rcu_read_unlock().
* Writers use virtqueue mutex. */
void __rcu *private_data;
/* Log write descriptors */ /* Log write descriptors */
void __user *log_base; void __user *log_base;
struct vhost_log *log; struct vhost_log *log;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment