Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 22fa90c7 authored by Asias He's avatar Asias He Committed by Michael S. Tsirkin
Browse files

vhost: Remove custom vhost rcu usage



Now, vq->private_data is always accessed under vq mutex. No need to play
the vhost rcu trick.

Signed-off-by: default avatarAsias He <asias@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent e7802212
Loading
Loading
Loading
Loading
+6 −10
Original line number Diff line number Diff line
@@ -15,7 +15,6 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>

@@ -749,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
	struct socket *sock;

	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
	sock = vq->private_data;
	if (!sock)
		return 0;

@@ -763,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
	struct socket *sock;

	mutex_lock(&vq->mutex);
	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
	sock = vq->private_data;
	vhost_net_disable_vq(n, vq);
	rcu_assign_pointer(vq->private_data, NULL);
	vq->private_data = NULL;
	mutex_unlock(&vq->mutex);
	return sock;
}
@@ -922,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
	}

	/* start polling new socket */
	oldsock = rcu_dereference_protected(vq->private_data,
					    lockdep_is_held(&vq->mutex));
	oldsock = vq->private_data;
	if (sock != oldsock) {
		ubufs = vhost_net_ubuf_alloc(vq,
					     sock && vhost_sock_zcopy(sock));
@@ -933,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
		}

		vhost_net_disable_vq(n, vq);
		rcu_assign_pointer(vq->private_data, sock);
		vq->private_data = sock;
		r = vhost_init_used(vq);
		if (r)
			goto err_used;
@@ -967,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
	return 0;

err_used:
	rcu_assign_pointer(vq->private_data, oldsock);
	vq->private_data = oldsock;
	vhost_net_enable_vq(n, vq);
	if (ubufs)
		vhost_net_ubuf_put_wait_and_free(ubufs);
+2 −4
Original line number Diff line number Diff line
@@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
		       sizeof(vs->vs_vhost_wwpn));
		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
			vq = &vs->vqs[i].vq;
			/* Flushing the vhost_work acts as synchronize_rcu */
			mutex_lock(&vq->mutex);
			rcu_assign_pointer(vq->private_data, vs_tpg);
			vq->private_data = vs_tpg;
			vhost_init_used(vq);
			mutex_unlock(&vq->mutex);
		}
@@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
	if (match) {
		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
			vq = &vs->vqs[i].vq;
			/* Flushing the vhost_work acts as synchronize_rcu */
			mutex_lock(&vq->mutex);
			rcu_assign_pointer(vq->private_data, NULL);
			vq->private_data = NULL;
			mutex_unlock(&vq->mutex);
		}
	}
+2 −4
Original line number Diff line number Diff line
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>

@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
		priv = test ? n : NULL;

		/* start polling new socket */
		oldpriv = rcu_dereference_protected(vq->private_data,
						    lockdep_is_held(&vq->mutex));
		rcu_assign_pointer(vq->private_data, priv);
		oldpriv = vq->private_data;
		vq->private_data = priv;

		r = vhost_init_used(&n->vqs[index]);

+2 −8
Original line number Diff line number Diff line
@@ -103,14 +103,8 @@ struct vhost_virtqueue {
	struct iovec iov[UIO_MAXIOV];
	struct iovec *indirect;
	struct vring_used_elem *heads;
	/* We use a kind of RCU to access private pointer.
	 * All readers access it from worker, which makes it possible to
	 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
	 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
	 * vhost_work execution acts instead of rcu_read_lock() and the end of
	 * vhost_work execution acts instead of rcu_read_unlock().
	 * Writers use virtqueue mutex. */
	void __rcu *private_data;
	/* Protected by virtqueue mutex. */
	void *private_data;
	/* Log write descriptors */
	void __user *log_base;
	struct vhost_log *log;