Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 389ab7f0 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Alexei Starovoitov
Browse files

xdp: introduce xdp_return_frame_rx_napi



When sending an xdp_frame through xdp_do_redirect call, then error
cases can happen where the xdp_frame needs to be dropped, and
returning an -errno code isn't sufficient/possible any-longer
(e.g. for cpumap case). This is already fully supported, by simply
calling xdp_return_frame.

This patch is an optimization, which provides xdp_return_frame_rx_napi,
which is a faster variant for these error cases.  It take advantage of
the protection provided by XDP RX running under NAPI protection.

This change is mostly relevant for drivers using the page_pool
allocator as it can take advantage of this. (Tested with mlx5).

Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 9940fbf6
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -115,13 +115,14 @@ void page_pool_destroy(struct page_pool *pool);
void __page_pool_put_page(struct page_pool *pool,
			  struct page *page, bool allow_direct);

static inline void page_pool_put_page(struct page_pool *pool, struct page *page)
static inline void page_pool_put_page(struct page_pool *pool,
				      struct page *page, bool allow_direct)
{
	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
	 */
#ifdef CONFIG_PAGE_POOL
	__page_pool_put_page(pool, page, false);
	__page_pool_put_page(pool, page, allow_direct);
#endif
}
/* Very limited use-cases allow recycle direct */
+1 −0
Original line number Diff line number Diff line
@@ -104,6 +104,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
}

void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);

int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+1 −1
Original line number Diff line number Diff line
@@ -578,7 +578,7 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
		err = __ptr_ring_produce(q, xdpf);
		if (err) {
			drops++;
			xdp_return_frame(xdpf);
			xdp_return_frame_rx_napi(xdpf);
		}
		processed++;
	}
+1 −1
Original line number Diff line number Diff line
@@ -239,7 +239,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
		err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
		if (err) {
			drops++;
			xdp_return_frame(xdpf);
			xdp_return_frame_rx_napi(xdpf);
		} else {
			sent++;
		}
+16 −4
Original line number Diff line number Diff line
@@ -308,7 +308,13 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);

static void xdp_return(void *data, struct xdp_mem_info *mem)
/* XDP RX runs under NAPI protection, and in different delivery error
 * scenarios (e.g. queue full), it is possible to return the xdp_frame
 * while still leveraging this protection.  The @napi_direct boolian
 * is used for those calls sites.  Thus, allowing for faster recycling
 * of xdp_frames/pages in those cases.
 */
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
{
	struct xdp_mem_allocator *xa;
	struct page *page;
@@ -320,7 +326,7 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
		page = virt_to_head_page(data);
		if (xa)
			page_pool_put_page(xa->page_pool, page);
			page_pool_put_page(xa->page_pool, page, napi_direct);
		else
			put_page(page);
		rcu_read_unlock();
@@ -340,12 +346,18 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)

void xdp_return_frame(struct xdp_frame *xdpf)
{
	xdp_return(xdpf->data, &xdpf->mem);
	__xdp_return(xdpf->data, &xdpf->mem, false);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);

void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
	__xdp_return(xdpf->data, &xdpf->mem, true);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);

void xdp_return_buff(struct xdp_buff *xdp)
{
	xdp_return(xdp->data, &xdp->rxq->mem);
	__xdp_return(xdp->data, &xdp->rxq->mem, true);
}
EXPORT_SYMBOL_GPL(xdp_return_buff);