Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7db4c0dd authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-XDP-header-adjust'



Yuval Mintz says:

====================
qede: support XDP head adjustments

Daniel has brought to my attention the fact that qede is the only driver
that currently supports XDP but still fails any program where
xdp_adjust_head is set on the bpf_prog. This series is meant to remedy
this and align qede with the rest of the driver, making it possible to
remove said field.

Patch #1 contains a minor cache-saving optimization for latter patches.

Patches #2 & #3 address existing issues with the qede implementation
[#2 should have been a part of this as it addresses something that's
affected by the additional headroom; #3 is simply here for the ride].

Patches #4 & #5 add the necessary logic in driver for ingress headroom,
the first adding the infrastrucutre needed for supporting the headroon
[as currently qede doesn't support such], and the second removing the
existing XDP limitation.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1ca22126 059eeb07
Loading
Loading
Loading
Loading
+15 −7
Original line number Diff line number Diff line
@@ -313,21 +313,24 @@ struct qede_rx_queue {
	u8 data_direction;
	u8 rxq_id;

	/* Used once per each NAPI run */
	u16 num_rx_buffers;

	u16 rx_headroom;

	u32 rx_buf_size;
	u32 rx_buf_seg_size;

	u64 rcv_pkts;

	struct sw_rx_data *sw_rx_ring;
	struct qed_chain rx_bd_ring;
	struct qed_chain rx_comp_ring ____cacheline_aligned;

	/* Used once per each NAPI run */
	u16 num_rx_buffers;

	/* GRO */
	struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];

	/* Used once per each NAPI run */
	u64 rcv_pkts;

	u64 rx_hw_errors;
	u64 rx_alloc_errors;
	u64 rx_ip_frags;
@@ -349,6 +352,11 @@ struct sw_tx_bd {
#define QEDE_TSO_SPLIT_BD		BIT(0)
};

struct sw_tx_xdp {
	struct page *page;
	dma_addr_t mapping;
};

struct qede_tx_queue {
	u8 is_xdp;
	bool is_legacy;
@@ -372,11 +380,11 @@ struct qede_tx_queue {
#define QEDE_TXQ_IDX_TO_XDP(edev, idx)	((idx) + QEDE_MAX_TSS_CNT(edev))

	/* Regular Tx requires skb + metadata for release purpose,
	 * while XDP requires only the pages themselves.
	 * while XDP requires the pages and the mapped address.
	 */
	union {
		struct sw_tx_bd *skbs;
		struct page **pages;
		struct sw_tx_xdp *xdp;
	} sw_tx_ring;

	struct qed_chain tx_pbl;
+5 −5
Original line number Diff line number Diff line
@@ -520,11 +520,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
{
	struct qede_reload_args args;

	if (prog && prog->xdp_adjust_head) {
		DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
		return -EOPNOTSUPP;
	}

	/* If we're called, there was already a bpf reference increment */
	args.func = &qede_xdp_reload_func;
	args.u.new_prog = prog;
@@ -537,6 +532,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
	struct qede_dev *edev = netdev_priv(dev);

	if (IS_VF(edev)) {
		DP_NOTICE(edev, "VFs don't support XDP\n");
		return -EOPNOTSUPP;
	}

	switch (xdp->command) {
	case XDP_SETUP_PROG:
		return qede_xdp_set(edev, xdp->prog);
+40 −30
Original line number Diff line number Diff line
@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
	WARN_ON(!rx_bd);
	rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
				     rxq->rx_headroom);

	rxq->sw_rx_prod++;
	rxq->filled_buffers++;
@@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
				   metadata->mapping + padding,
				   length, PCI_DMA_TODEVICE);

	txq->sw_tx_ring.pages[idx] = metadata->data;
	txq->sw_tx_ring.xdp[idx].page = metadata->data;
	txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
	txq->sw_tx_prod++;

	/* Mark the fastpath for future XDP doorbell */
@@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)

static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
	struct eth_tx_1st_bd *bd;
	u16 hw_bd_cons;
	u16 hw_bd_cons, idx;

	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
	barrier();

	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
		bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
		qed_chain_consume(&txq->tx_pbl);
		idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;

		dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
		dma_unmap_page(&edev->pdev->dev,
			       txq->sw_tx_ring.xdp[idx].mapping,
			       PAGE_SIZE, DMA_BIDIRECTIONAL);
		__free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
						  NUM_TX_BDS_MAX]);
		__free_page(txq->sw_tx_ring.xdp[idx].page);

		txq->sw_tx_cons++;
		txq->xmit_pkts++;
@@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
	new_mapping = curr_prod->mapping + curr_prod->page_offset;

	rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
	rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
	rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
					  rxq->rx_headroom);

	rxq->sw_rx_prod++;
	curr_cons->data = NULL;
@@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);

	napi_gro_receive(&fp->napi, skb);
	rxq->rcv_pkts++;
}

static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,7 +886,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
		       "Strange - TPA cont with more than a single len_list entry\n");
}

static void qede_tpa_end(struct qede_dev *edev,
static int qede_tpa_end(struct qede_dev *edev,
			struct qede_fastpath *fp,
			struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
@@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev,

	tpa_info->state = QEDE_AGG_STATE_NONE;

	return;
	return 1;
err:
	tpa_info->state = QEDE_AGG_STATE_NONE;
	dev_kfree_skb_any(tpa_info->skb);
	tpa_info->skb = NULL;
	return 0;
}

static u8 qede_check_notunn_csum(u16 flag)
@@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev,
			struct qede_rx_queue *rxq,
			struct bpf_prog *prog,
			struct sw_rx_data *bd,
			struct eth_fast_path_rx_reg_cqe *cqe)
			struct eth_fast_path_rx_reg_cqe *cqe,
			u16 *data_offset, u16 *len)
{
	u16 len = le16_to_cpu(cqe->len_on_first_bd);
	struct xdp_buff xdp;
	enum xdp_action act;

	xdp.data = page_address(bd->data) + cqe->placement_offset;
	xdp.data_end = xdp.data + len;
	xdp.data_hard_start = page_address(bd->data);
	xdp.data = xdp.data_hard_start + *data_offset;
	xdp.data_end = xdp.data + *len;

	/* Queues always have a full reset currently, so for the time
	 * being until there's atomic program replace just mark read
@@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
	act = bpf_prog_run_xdp(prog, &xdp);
	rcu_read_unlock();

	/* Recalculate, as XDP might have changed the headers */
	*data_offset = xdp.data - xdp.data_hard_start;
	*len = xdp.data_end - xdp.data;

	if (act == XDP_PASS)
		return true;

@@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
		/* Now if there's a transmission problem, we'd still have to
		 * throw current buffer, as replacement was already allocated.
		 */
		if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
		if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
			dma_unmap_page(rxq->dev, bd->mapping,
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
			__free_page(bd->data);
@@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
					    struct sw_rx_data *bd, u16 len,
					    u16 pad)
{
	unsigned int offset = bd->page_offset;
	unsigned int offset = bd->page_offset + pad;
	struct skb_frag_struct *frag;
	struct page *page = bd->data;
	unsigned int pull_len;
@@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
	 */
	if (len + pad <= edev->rx_copybreak) {
		memcpy(skb_put(skb, len),
		       page_address(page) + pad + offset, len);
		       page_address(page) + offset, len);
		qede_reuse_page(rxq, bd);
		goto out;
	}
@@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
	frag = &skb_shinfo(skb)->frags[0];

	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
			page, pad + offset, len, rxq->rx_buf_seg_size);
			page, offset, len, rxq->rx_buf_seg_size);

	va = skb_frag_address(frag);
	pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
@@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
		return 0;
	case ETH_RX_CQE_TYPE_TPA_END:
		qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
		return 1;
		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
	default:
		return 0;
	}
@@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev,

	fp_cqe = &cqe->fast_path_regular;
	len = le16_to_cpu(fp_cqe->len_on_first_bd);
	pad = fp_cqe->placement_offset;
	pad = fp_cqe->placement_offset + rxq->rx_headroom;

	/* Run eBPF program if one is attached */
	if (xdp_prog)
		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
			return 1;
		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
				 &pad, &len))
			return 0;

	/* If this is an error packet then drop it */
	flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
	struct qede_rx_queue *rxq = fp->rxq;
	struct qede_dev *edev = fp->edev;
	int work_done = 0, rcv_pkts = 0;
	u16 hw_comp_cons, sw_comp_cons;
	int work_done = 0;

	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)

	/* Loop to complete all indicated BDs */
	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
		qede_rx_process_cqe(edev, fp, rxq);
		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
		work_done++;
	}

	rxq->rcv_pkts += rcv_pkts;

	/* Allocate replacement buffers */
	while (rxq->num_rx_buffers - rxq->filled_buffers)
		if (qede_alloc_rx_buffer(rxq, false))
+8 −6
Original line number Diff line number Diff line
@@ -1187,9 +1187,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
	rxq->num_rx_buffers = edev->q_num_rx_buffers;

	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;

	if (rxq->rx_buf_size > PAGE_SIZE)
		rxq->rx_buf_size = PAGE_SIZE;
	/* Make sure that the headroom and  payload fit in a single page */
	if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
		rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;

	/* Segment size to spilt a page in multiple equal parts,
	 * unless XDP is used in which case we'd use the entire page.
@@ -1251,7 +1253,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
	/* Free the parallel SW ring */
	if (txq->is_xdp)
		kfree(txq->sw_tx_ring.pages);
		kfree(txq->sw_tx_ring.xdp);
	else
		kfree(txq->sw_tx_ring.skbs);

@@ -1269,9 +1271,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)

	/* Allocate the parallel driver ring for Tx buffers */
	if (txq->is_xdp) {
		size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
		txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
		if (!txq->sw_tx_ring.pages)
		size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
		if (!txq->sw_tx_ring.xdp)
			goto err;
	} else {
		size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;