Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cac320c8 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller
Browse files

virtio_net: convert to use generic xdp_frame and xdp_return_frame API



The virtio_net driver assumes XDP frames are always released based on
page refcnt (via put_page).  Thus, is only queues the XDP data pointer
address and uses virt_to_head_page() to retrieve struct page.

Use the XDP return API to get away from such assumptions. Instead
queue an xdp_frame, which allow us to use the xdp_return_frame API,
when releasing the frame.

V8: Avoid endianness issues (found by kbuild test robot)
V9: Change __virtnet_xdp_xmit from bool to int return value (found by Dan Carpenter)

Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1ffcbc85
Loading
Loading
Loading
Loading
+29 −25
Original line number Diff line number Diff line
@@ -415,38 +415,48 @@ static void virtnet_xdp_flush(struct net_device *dev)
	virtqueue_kick(sq->vq);
}

static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
static int __virtnet_xdp_xmit(struct virtnet_info *vi,
			      struct xdp_buff *xdp)
{
	struct virtio_net_hdr_mrg_rxbuf *hdr;
	unsigned int len;
	struct xdp_frame *xdpf, *xdpf_sent;
	struct send_queue *sq;
	unsigned int len;
	unsigned int qp;
	void *xdp_sent;
	int err;

	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
	sq = &vi->sq[qp];

	/* Free up any pending old buffers before queueing new ones. */
	while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
		struct page *sent_page = virt_to_head_page(xdp_sent);
	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
		xdp_return_frame(xdpf_sent->data, &xdpf_sent->mem);

		put_page(sent_page);
	}
	xdpf = convert_to_xdp_frame(xdp);
	if (unlikely(!xdpf))
		return -EOVERFLOW;

	/* virtqueue want to use data area in-front of packet */
	if (unlikely(xdpf->metasize > 0))
		return -EOPNOTSUPP;

	if (unlikely(xdpf->headroom < vi->hdr_len))
		return -EOVERFLOW;

	xdp->data -= vi->hdr_len;
	/* Make room for virtqueue hdr (also change xdpf->headroom?) */
	xdpf->data -= vi->hdr_len;
	/* Zero header and leave csum up to XDP layers */
	hdr = xdp->data;
	hdr = xdpf->data;
	memset(hdr, 0, vi->hdr_len);
	xdpf->len   += vi->hdr_len;

	sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
	sg_init_one(sq->sg, xdpf->data, xdpf->len);

	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
	if (unlikely(err))
		return false; /* Caller handle free/refcnt */
		return -ENOSPC; /* Caller handle free/refcnt */

	return true;
	return 0;
}

static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
@@ -454,7 +464,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
	struct virtnet_info *vi = netdev_priv(dev);
	struct receive_queue *rq = vi->rq;
	struct bpf_prog *xdp_prog;
	bool sent;

	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
	 * indicate XDP resources have been successfully allocated.
@@ -463,10 +472,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
	if (!xdp_prog)
		return -ENXIO;

	sent = __virtnet_xdp_xmit(vi, xdp);
	if (!sent)
		return -ENOSPC;
	return 0;
	return __virtnet_xdp_xmit(vi, xdp);
}

static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -555,7 +561,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
	struct page *page = virt_to_head_page(buf);
	unsigned int delta = 0;
	struct page *xdp_page;
	bool sent;
	int err;

	len -= vi->hdr_len;
@@ -606,8 +611,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
			delta = orig_data - xdp.data;
			break;
		case XDP_TX:
			sent = __virtnet_xdp_xmit(vi, &xdp);
			if (unlikely(!sent)) {
			err = __virtnet_xdp_xmit(vi, &xdp);
			if (unlikely(err)) {
				trace_xdp_exception(vi->dev, xdp_prog, act);
				goto err_xdp;
			}
@@ -690,7 +695,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
	struct bpf_prog *xdp_prog;
	unsigned int truesize;
	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
	bool sent;
	int err;

	head_skb = NULL;
@@ -762,8 +766,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
			}
			break;
		case XDP_TX:
			sent = __virtnet_xdp_xmit(vi, &xdp);
			if (unlikely(!sent)) {
			err = __virtnet_xdp_xmit(vi, &xdp);
			if (unlikely(err)) {
				trace_xdp_exception(vi->dev, xdp_prog, act);
				if (unlikely(xdp_page != page))
					put_page(xdp_page);