Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e1d6fbc3 authored by David S. Miller's avatar David S. Miller
Browse files

virtio_net: fix race in RX VQ processing



Michael S. Tsirkin says:

====================
Jason Wang reported a race in RX VQ processing:
virtqueue_enable_cb is called outside napi lock,
violating virtio serialization rules.
The race has been there from day 1, but it got especially nasty in 3.0
when commit a5c262c5
"virtio_ring: support event idx feature"
added more dependency on vq state.

Please review, and consider for 3.11 and stable.

Changes from v1:
	- Added Jason's Tested-by tag
	- minor coding style fix
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 01276ed2 cbdadbbf
Loading
Loading
Loading
Loading
+3 −2
Original line number Original line Diff line number Diff line
@@ -602,7 +602,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
		container_of(napi, struct receive_queue, napi);
		container_of(napi, struct receive_queue, napi);
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct virtnet_info *vi = rq->vq->vdev->priv;
	void *buf;
	void *buf;
	unsigned int len, received = 0;
	unsigned int r, len, received = 0;


again:
again:
	while (received < budget &&
	while (received < budget &&
@@ -619,8 +619,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)


	/* Out of packets? */
	/* Out of packets? */
	if (received < budget) {
	if (received < budget) {
		r = virtqueue_enable_cb_prepare(rq->vq);
		napi_complete(napi);
		napi_complete(napi);
		if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
		if (unlikely(virtqueue_poll(rq->vq, r)) &&
		    napi_schedule_prep(napi)) {
		    napi_schedule_prep(napi)) {
			virtqueue_disable_cb(rq->vq);
			virtqueue_disable_cb(rq->vq);
			__napi_schedule(napi);
			__napi_schedule(napi);
+44 −12
Original line number Original line Diff line number Diff line
@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);


/**
/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
 * @vq: the struct virtqueue we're talking about.
 * @vq: the struct virtqueue we're talking about.
 *
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * This re-enables callbacks; it returns current queue state
 * buffers in the queue, to detect a possible race between the driver
 * in an opaque unsigned value. This value should be later tested by
 * checking for more work, and enabling callbacks.
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
 *
 *
 * Caller must ensure we don't call this with other virtqueue
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 * operations at the same time (except where noted).
 */
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 last_used_idx;


	START_USE(vq);
	START_USE(vq);


@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
	 * either clear the flags bit or point the event index at the next
	 * either clear the flags bit or point the event index at the next
	 * entry. Always do both to keep code simple. */
	 * entry. Always do both to keep code simple. */
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	vring_used_event(&vq->vring) = vq->last_used_idx;
	vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
	virtio_mb(vq->weak_barriers);
	if (unlikely(more_used(vq))) {
	END_USE(vq);
	END_USE(vq);
		return false;
	return last_used_idx;
}
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);


	END_USE(vq);
/**
	return true;
 * virtqueue_poll - query pending used buffers
 * @vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	virtio_mb(vq->weak_barriers);
	return (u16)last_used_idx != vq->vring.used->idx;
}
EXPORT_SYMBOL_GPL(virtqueue_poll);

/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
	return !virtqueue_poll(_vq, last_used_idx);
}
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);


+4 −0
Original line number Original line Diff line number Diff line
@@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);


bool virtqueue_enable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq);


unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);

bool virtqueue_poll(struct virtqueue *vq, unsigned);

bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);


void *virtqueue_detach_unused_buf(struct virtqueue *vq);
void *virtqueue_detach_unused_buf(struct virtqueue *vq);