Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18445c4d authored by Rusty Russell's avatar Rusty Russell
Browse files

virtio: explicit enable_cb/disable_cb rather than callback return.



It seems that virtio_net wants to disable callbacks (interrupts) before
calling netif_rx_schedule(), so we can't use the return value to do so.

Rename "restart" to "cb_enable" and introduce "cb_disable" hook: callback
now returns void, rather than a boolean.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent a586d4f6
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ struct virtblk_req
	struct virtio_blk_inhdr in_hdr;
};

static bool blk_done(struct virtqueue *vq)
static void blk_done(struct virtqueue *vq)
{
	struct virtio_blk *vblk = vq->vdev->priv;
	struct virtblk_req *vbr;
@@ -65,7 +65,6 @@ static bool blk_done(struct virtqueue *vq)
	/* In case queue is stopped waiting for more buffers. */
	blk_start_queue(vblk->disk->queue);
	spin_unlock_irqrestore(&vblk->lock, flags);
	return true;
}

static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+1 −1
Original line number Diff line number Diff line
@@ -191,7 +191,7 @@ static void lg_notify(struct virtqueue *vq)
 * So we provide devices with a "find virtqueue and set it up" function. */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
				    unsigned index,
				    bool (*callback)(struct virtqueue *vq))
				    void (*callback)(struct virtqueue *vq))
{
	struct lguest_device *ldev = to_lgdev(vdev);
	struct lguest_vq_info *lvq;
+8 −7
Original line number Diff line number Diff line
@@ -52,13 +52,12 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
	sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
}

static bool skb_xmit_done(struct virtqueue *rvq)
static void skb_xmit_done(struct virtqueue *rvq)
{
	struct virtnet_info *vi = rvq->vdev->priv;

	/* In case we were waiting for output buffers. */
	netif_wake_queue(vi->dev);
	return true;
}

static void receive_skb(struct net_device *dev, struct sk_buff *skb,
@@ -161,12 +160,14 @@ static void try_fill_recv(struct virtnet_info *vi)
	vi->rvq->vq_ops->kick(vi->rvq);
}

static bool skb_recv_done(struct virtqueue *rvq)
static void skb_recv_done(struct virtqueue *rvq)
{
	struct virtnet_info *vi = rvq->vdev->priv;
	netif_rx_schedule(vi->dev, &vi->napi);
	/* Suppress further interrupts. */
	return false;
	/* Schedule NAPI, Suppress further interrupts if successful. */
	if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
		rvq->vq_ops->disable_cb(rvq);
		__netif_rx_schedule(vi->dev, &vi->napi);
	}
}

static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -192,7 +193,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
	/* Out of packets? */
	if (received < budget) {
		netif_rx_complete(vi->dev, napi);
		if (unlikely(!vi->rvq->vq_ops->restart(vi->rvq))
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
		    && netif_rx_reschedule(vi->dev, napi))
			goto again;
	}
+16 −5
Original line number Diff line number Diff line
@@ -220,7 +220,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
	return ret;
}

static bool vring_restart(struct virtqueue *_vq)
static void vring_disable_cb(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

	START_USE(vq);
	BUG_ON(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
	END_USE(vq);
}

static bool vring_enable_cb(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);

@@ -254,8 +264,8 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
	if (vq->vq.callback && !vq->vq.callback(&vq->vq))
		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);

	return IRQ_HANDLED;
}
@@ -264,7 +274,8 @@ static struct virtqueue_ops vring_vq_ops = {
	.add_buf = vring_add_buf,
	.get_buf = vring_get_buf,
	.kick = vring_kick,
	.restart = vring_restart,
	.disable_cb = vring_disable_cb,
	.enable_cb = vring_enable_cb,
	.shutdown = vring_shutdown,
};

@@ -272,7 +283,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
				      struct virtio_device *vdev,
				      void *pages,
				      void (*notify)(struct virtqueue *),
				      bool (*callback)(struct virtqueue *))
				      void (*callback)(struct virtqueue *))
{
	struct vring_virtqueue *vq;
	unsigned int i;
+6 −5
Original line number Diff line number Diff line
@@ -11,15 +11,13 @@
/**
 * virtqueue - a queue to register buffers for sending or receiving.
 * @callback: the function to call when buffers are consumed (can be NULL).
 *    If this returns false, callbacks are suppressed until vq_ops->restart
 *    is called.
 * @vdev: the virtio device this queue was created for.
 * @vq_ops: the operations for this virtqueue (see below).
 * @priv: a pointer for the virtqueue implementation to use.
 */
struct virtqueue
{
	bool (*callback)(struct virtqueue *vq);
	void (*callback)(struct virtqueue *vq);
	struct virtio_device *vdev;
	struct virtqueue_ops *vq_ops;
	void *priv;
@@ -41,7 +39,9 @@ struct virtqueue
 *	vq: the struct virtqueue we're talking about.
 *	len: the length written into the buffer
 *	Returns NULL or the "data" token handed to add_buf.
 * @restart: restart callbacks after callback returned false.
 * @disable_cb: disable callbacks
 *	vq: the struct virtqueue we're talking about.
 * @enable_cb: restart callbacks after disable_cb.
 *	vq: the struct virtqueue we're talking about.
 *	This returns "false" (and doesn't re-enable) if there are pending
 *	buffers in the queue, to avoid a race.
@@ -65,7 +65,8 @@ struct virtqueue_ops {

	void *(*get_buf)(struct virtqueue *vq, unsigned int *len);

	bool (*restart)(struct virtqueue *vq);
	void (*disable_cb)(struct virtqueue *vq);
	bool (*enable_cb)(struct virtqueue *vq);

	void (*shutdown)(struct virtqueue *vq);
};
Loading