Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13df4c66 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller
Browse files

nfp: reorganize nfp_net_rx() to get packet offsets early



Calculate packet offsets early in nfp_net_rx() so that we will be
able to use them in upcoming XDP handler.  While at it move relevant
variables into the loop scope.

Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 164d1e9e
Loading
Loading
Loading
Loading
+30 −26
Original line number Diff line number Diff line
@@ -1383,16 +1383,17 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
	struct nfp_net *nn = r_vec->nfp_net;
	unsigned int data_len, meta_len;
	struct nfp_net_rx_buf *rxbuf;
	struct nfp_net_rx_desc *rxd;
	dma_addr_t new_dma_addr;
	struct sk_buff *skb;
	int pkts_polled = 0;
	void *new_frag;
	int idx;

	while (pkts_polled < budget) {
		unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
		struct nfp_net_rx_buf *rxbuf;
		struct nfp_net_rx_desc *rxd;
		dma_addr_t new_dma_addr;
		void *new_frag;

		idx = rx_ring->rd_p & (rx_ring->cnt - 1);

		rxd = &rx_ring->rxds[idx];
@@ -1408,22 +1409,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
		pkts_polled++;

		rxbuf =	&rx_ring->rxbufs[idx];
		skb = build_skb(rxbuf->frag, nn->fl_bufsz);
		if (unlikely(!skb)) {
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
			continue;
		}
		new_frag = nfp_net_napi_alloc_one(nn, &new_dma_addr);
		if (unlikely(!new_frag)) {
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
			continue;
		}

		nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[idx].dma_addr,
				     nn->fl_bufsz, DMA_FROM_DEVICE);

		nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);

		/*         < meta_len >
		 *  <-- [rx_offset] -->
		 *  ---------------------------------------------------------
@@ -1438,20 +1423,39 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
		 */
		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
		data_len = le16_to_cpu(rxd->rxd.data_len);
		pkt_len = data_len - meta_len;

		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
			skb_reserve(skb, NFP_NET_RX_BUF_HEADROOM + meta_len);
			pkt_off = meta_len;
		else
			skb_reserve(skb,
				    NFP_NET_RX_BUF_HEADROOM + nn->rx_offset);
		skb_put(skb, data_len - meta_len);
			pkt_off = nn->rx_offset;
		data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;

		/* Stats update */
		u64_stats_update_begin(&r_vec->rx_sync);
		r_vec->rx_pkts++;
		r_vec->rx_bytes += skb->len;
		r_vec->rx_bytes += pkt_len;
		u64_stats_update_end(&r_vec->rx_sync);

		skb = build_skb(rxbuf->frag, nn->fl_bufsz);
		if (unlikely(!skb)) {
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
			continue;
		}
		new_frag = nfp_net_napi_alloc_one(nn, &new_dma_addr);
		if (unlikely(!new_frag)) {
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
			continue;
		}

		nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[idx].dma_addr,
				     nn->fl_bufsz, DMA_FROM_DEVICE);

		nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);

		skb_reserve(skb, data_off);
		skb_put(skb, pkt_len);

		if (nn->fw_ver.major <= 3) {
			nfp_net_set_hash_desc(nn->netdev, skb, rxd);
		} else if (meta_len) {