Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5168d732 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller
Browse files

mlx5: basic XDP_REDIRECT forward support



This implements basic XDP redirect support in mlx5 driver.

Notice that the ndo_xdp_xmit() is NOT implemented, because that API
need some changes that this patchset is working towards.

The main purpose of this patch is have different drivers doing
XDP_REDIRECT to show how different memory models behave in a cross
driver world.

Update(pre-RFCv2 Tariq): Need to DMA unmap page before xdp_do_redirect,
as the return API does not exist yet to to keep this mapped.

Update(pre-RFCv3 Saeed): Don't mix XDP_TX and XDP_REDIRECT flushing,
introduce xdpsq.db.redirect_flush boolian.

V9: Adjust for commit 121e8927 ("net/mlx5e: Refactor RQ XDP_TX indication")

Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 897ddc24
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -392,6 +392,7 @@ struct mlx5e_xdpsq {
	struct {
		struct mlx5e_dma_info     *di;
		bool                       doorbell;
		bool                       redirect_flush;
	} db;

	/* read only */
+24 −3
Original line number Diff line number Diff line
@@ -236,14 +236,20 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
	return 0;
}

static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
					struct mlx5e_dma_info *dma_info)
{
	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
		       rq->buff.map_dir);
}

void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
			bool recycle)
{
	if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
		return;

	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
		       rq->buff.map_dir);
	mlx5e_page_dma_unmap(rq, dma_info);
	put_page(dma_info->page);
}

@@ -800,9 +806,10 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
				   struct mlx5e_dma_info *di,
				   void *va, u16 *rx_headroom, u32 *len)
{
	const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
	struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
	struct xdp_buff xdp;
	u32 act;
	int err;

	if (!prog)
		return false;
@@ -823,6 +830,15 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
		if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
			trace_xdp_exception(rq->netdev, prog, act);
		return true;
	case XDP_REDIRECT:
		/* When XDP enabled then page-refcnt==1 here */
		err = xdp_do_redirect(rq->netdev, &xdp, prog);
		if (!err) {
			__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
			rq->xdpsq.db.redirect_flush = true;
			mlx5e_page_dma_unmap(rq, di);
		}
		return true;
	default:
		bpf_warn_invalid_xdp_action(act);
	case XDP_ABORTED:
@@ -1140,6 +1156,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
		xdpsq->db.doorbell = false;
	}

	if (xdpsq->db.redirect_flush) {
		xdp_do_flush_map();
		xdpsq->db.redirect_flush = false;
	}

	mlx5_cqwq_update_db_record(&cq->wq);

	/* ensure cq space is freed before enabling more cqes */