Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0e92279 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'thunderx-xdp'



Sunil Goutham says:

====================
net: thunderx: Adds XDP support

This patch series adds support for XDP to ThunderX NIC driver
which is used on CN88xx, CN81xx and CN83xx platforms.

Patches 1-4 are performance improvement and cleanup patches
which are done keeping XDP performance bottlenecks in view.
Rest of the patches adds actual XDP support.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ee0d8d84 77322538
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -252,12 +252,14 @@ struct nicvf_drv_stats {
	u64 tx_csum_overflow;

	/* driver debug stats */
	u64 rcv_buffer_alloc_failures;
	u64 tx_tso;
	u64 tx_timeout;
	u64 txq_stop;
	u64 txq_wake;

	u64 rcv_buffer_alloc_failures;
	u64 page_alloc;

	struct u64_stats_sync   syncp;
};

@@ -266,9 +268,9 @@ struct nicvf {
	struct net_device	*netdev;
	struct pci_dev		*pdev;
	void __iomem		*reg_base;
	struct bpf_prog         *xdp_prog;
#define	MAX_QUEUES_PER_QSET			8
	struct queue_set	*qs;
	struct nicvf_cq_poll	*napi[8];
	void			*iommu_domain;
	u8			vf_id;
	u8			sqs_id;
@@ -294,6 +296,7 @@ struct nicvf {
	/* Queue count */
	u8			rx_queues;
	u8			tx_queues;
	u8			xdp_tx_queues;
	u8			max_queues;

	u8			node;
@@ -318,6 +321,9 @@ struct nicvf {
	struct nicvf_drv_stats  __percpu *drv_stats;
	struct bgx_stats	bgx_stats;

	/* Napi */
	struct nicvf_cq_poll	*napi[8];

	/* MSI-X  */
	u8			num_vec;
	char			irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
+22 −7
Original line number Diff line number Diff line
@@ -100,11 +100,12 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
	NICVF_DRV_STAT(tx_csum_overlap),
	NICVF_DRV_STAT(tx_csum_overflow),

	NICVF_DRV_STAT(rcv_buffer_alloc_failures),
	NICVF_DRV_STAT(tx_tso),
	NICVF_DRV_STAT(tx_timeout),
	NICVF_DRV_STAT(txq_stop),
	NICVF_DRV_STAT(txq_wake),
	NICVF_DRV_STAT(rcv_buffer_alloc_failures),
	NICVF_DRV_STAT(page_alloc),
};

static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -720,7 +721,7 @@ static int nicvf_set_channels(struct net_device *dev,
	struct nicvf *nic = netdev_priv(dev);
	int err = 0;
	bool if_up = netif_running(dev);
	int cqcount;
	u8 cqcount, txq_count;

	if (!channel->rx_count || !channel->tx_count)
		return -EINVAL;
@@ -729,10 +730,26 @@ static int nicvf_set_channels(struct net_device *dev,
	if (channel->tx_count > nic->max_queues)
		return -EINVAL;

	if (nic->xdp_prog &&
	    ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
		netdev_err(nic->netdev,
			   "XDP mode, RXQs + TXQs > Max %d\n",
			   nic->max_queues);
		return -EINVAL;
	}

	if (if_up)
		nicvf_stop(dev);

	cqcount = max(channel->rx_count, channel->tx_count);
	nic->rx_queues = channel->rx_count;
	nic->tx_queues = channel->tx_count;
	if (!nic->xdp_prog)
		nic->xdp_tx_queues = 0;
	else
		nic->xdp_tx_queues = channel->rx_count;

	txq_count = nic->xdp_tx_queues + nic->tx_queues;
	cqcount = max(nic->rx_queues, txq_count);

	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
@@ -741,12 +758,10 @@ static int nicvf_set_channels(struct net_device *dev,
		nic->sqs_count = 0;
	}

	nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
	nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);

	nic->rx_queues = channel->rx_count;
	nic->tx_queues = channel->tx_count;
	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
	if (err)
		return err;
+276 −37
Original line number Diff line number Diff line
@@ -17,6 +17,9 @@
#include <linux/prefetch.h>
#include <linux/irq.h>
#include <linux/iommu.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>

#include "nic_reg.h"
#include "nic.h"
@@ -397,8 +400,10 @@ static void nicvf_request_sqs(struct nicvf *nic)

	if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
		rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
	if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
		tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;

	tx_queues = nic->tx_queues + nic->xdp_tx_queues;
	if (tx_queues > MAX_SND_QUEUES_PER_QS)
		tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;

	/* Set no of Rx/Tx queues in each of the SQsets */
	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
@@ -496,12 +501,99 @@ static int nicvf_init_resources(struct nicvf *nic)
	return 0;
}

static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
				struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
				struct sk_buff **skb)
{
	struct xdp_buff xdp;
	struct page *page;
	u32 action;
	u16 len, offset = 0;
	u64 dma_addr, cpu_addr;
	void *orig_data;

	/* Retrieve packet buffer's DMA address and length */
	len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
	dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));

	cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
	if (!cpu_addr)
		return false;
	cpu_addr = (u64)phys_to_virt(cpu_addr);
	page = virt_to_page((void *)cpu_addr);

	xdp.data_hard_start = page_address(page);
	xdp.data = (void *)cpu_addr;
	xdp.data_end = xdp.data + len;
	orig_data = xdp.data;

	rcu_read_lock();
	action = bpf_prog_run_xdp(prog, &xdp);
	rcu_read_unlock();

	/* Check if XDP program has changed headers */
	if (orig_data != xdp.data) {
		len = xdp.data_end - xdp.data;
		offset = orig_data - xdp.data;
		dma_addr -= offset;
	}

	switch (action) {
	case XDP_PASS:
		/* Check if it's a recycled page, if not
		 * unmap the DMA mapping.
		 *
		 * Recycled page holds an extra reference.
		 */
		if (page_ref_count(page) == 1) {
			dma_addr &= PAGE_MASK;
			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
					     DMA_FROM_DEVICE,
					     DMA_ATTR_SKIP_CPU_SYNC);
		}

		/* Build SKB and pass on packet to network stack */
		*skb = build_skb(xdp.data,
				 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
		if (!*skb)
			put_page(page);
		else
			skb_put(*skb, len);
		return false;
	case XDP_TX:
		nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
		return true;
	default:
		bpf_warn_invalid_xdp_action(action);
	case XDP_ABORTED:
		trace_xdp_exception(nic->netdev, prog, action);
	case XDP_DROP:
		/* Check if it's a recycled page, if not
		 * unmap the DMA mapping.
		 *
		 * Recycled page holds an extra reference.
		 */
		if (page_ref_count(page) == 1) {
			dma_addr &= PAGE_MASK;
			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
					     DMA_FROM_DEVICE,
					     DMA_ATTR_SKIP_CPU_SYNC);
		}
		put_page(page);
		return true;
	}
	return false;
}

static void nicvf_snd_pkt_handler(struct net_device *netdev,
				  struct cqe_send_t *cqe_tx,
				  int cqe_type, int budget,
				  int budget, int *subdesc_cnt,
				  unsigned int *tx_pkts, unsigned int *tx_bytes)
{
	struct sk_buff *skb = NULL;
	struct page *page;
	struct nicvf *nic = netdev_priv(netdev);
	struct snd_queue *sq;
	struct sq_hdr_subdesc *hdr;
@@ -513,12 +605,26 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
		return;

	netdev_dbg(nic->netdev,
		   "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
		   __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
		   cqe_tx->sqe_ptr, hdr->subdesc_cnt);
	/* Check for errors */
	if (cqe_tx->send_status)
		nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);

	/* Is this a XDP designated Tx queue */
	if (sq->is_xdp) {
		page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
		/* Check if it's recycled page or else unmap DMA mapping */
		if (page && (page_ref_count(page) == 1))
			nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
						 hdr->subdesc_cnt);

		/* Release page reference for recycling */
		if (page)
			put_page(page);
		sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
		*subdesc_cnt += hdr->subdesc_cnt + 1;
		return;
	}

	nicvf_check_cqe_tx_errs(nic, cqe_tx);
	skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
	if (skb) {
		/* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -528,12 +634,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
			 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
			nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
						 tso_sqe->subdesc_cnt);
			nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
			*subdesc_cnt += tso_sqe->subdesc_cnt + 1;
		} else {
			nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
						 hdr->subdesc_cnt);
		}
		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
		*subdesc_cnt += hdr->subdesc_cnt + 1;
		prefetch(skb);
		(*tx_pkts)++;
		*tx_bytes += skb->len;
@@ -544,7 +650,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
		 * a SKB attached, so just free SQEs here.
		 */
		if (!nic->hw_tso)
			nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
			*subdesc_cnt += hdr->subdesc_cnt + 1;
	}
}

@@ -578,9 +684,9 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,

static void nicvf_rcv_pkt_handler(struct net_device *netdev,
				  struct napi_struct *napi,
				  struct cqe_rx_t *cqe_rx)
				  struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
{
	struct sk_buff *skb;
	struct sk_buff *skb = NULL;
	struct nicvf *nic = netdev_priv(netdev);
	struct nicvf *snic = nic;
	int err = 0;
@@ -595,16 +701,25 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
	}

	/* Check for errors */
	if (cqe_rx->err_level || cqe_rx->err_opcode) {
		err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
		if (err && !cqe_rx->rb_cnt)
			return;
	}

	skb = nicvf_get_rcv_skb(snic, cqe_rx);
	if (!skb) {
		netdev_dbg(nic->netdev, "Packet not received\n");
	/* For XDP, ignore pkts spanning multiple pages */
	if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
		/* Packet consumed by XDP */
		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
			return;
	} else {
		skb = nicvf_get_rcv_skb(snic, cqe_rx,
					nic->xdp_prog ? true : false);
	}

	if (!skb)
		return;

	if (netif_msg_pktdata(nic)) {
		netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
			    skb, skb->len);
@@ -646,13 +761,14 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
{
	int processed_cqe, work_done = 0, tx_done = 0;
	int cqe_count, cqe_head;
	int subdesc_cnt = 0;
	struct nicvf *nic = netdev_priv(netdev);
	struct queue_set *qs = nic->qs;
	struct cmp_queue *cq = &qs->cq[cq_idx];
	struct cqe_rx_t *cq_desc;
	struct netdev_queue *txq;
	struct snd_queue *sq;
	unsigned int tx_pkts = 0, tx_bytes = 0;
	struct snd_queue *sq = &qs->sq[cq_idx];
	unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;

	spin_lock_bh(&cq->lock);
loop:
@@ -667,8 +783,6 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
	cqe_head &= 0xFFFF;

	netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
		   __func__, cq_idx, cqe_count, cqe_head);
	while (processed_cqe < cqe_count) {
		/* Get the CQ descriptor */
		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -682,17 +796,15 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
			break;
		}

		netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
			   cq_idx, cq_desc->cqe_type);
		switch (cq_desc->cqe_type) {
		case CQE_TYPE_RX:
			nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
			work_done++;
		break;
		case CQE_TYPE_SEND:
			nicvf_snd_pkt_handler(netdev,
					      (void *)cq_desc, CQE_TYPE_SEND,
					      budget, &tx_pkts, &tx_bytes);
			nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
					      budget, &subdesc_cnt,
					      &tx_pkts, &tx_bytes);
			tx_done++;
		break;
		case CQE_TYPE_INVALID:
@@ -704,9 +816,6 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
		}
		processed_cqe++;
	}
	netdev_dbg(nic->netdev,
		   "%s CQ%d processed_cqe %d work_done %d budget %d\n",
		   __func__, cq_idx, processed_cqe, work_done, budget);

	/* Ring doorbell to inform H/W to reuse processed CQEs */
	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -716,13 +825,26 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
		goto loop;

done:
	/* Update SQ's descriptor free count */
	if (subdesc_cnt)
		nicvf_put_sq_desc(sq, subdesc_cnt);

	txq_idx = nicvf_netdev_qidx(nic, cq_idx);
	/* Handle XDP TX queues */
	if (nic->pnicvf->xdp_prog) {
		if (txq_idx < nic->pnicvf->xdp_tx_queues) {
			nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
			goto out;
		}
		nic = nic->pnicvf;
		txq_idx -= nic->pnicvf->xdp_tx_queues;
	}

	/* Wakeup TXQ if its stopped earlier due to SQ full */
	sq = &nic->qs->sq[cq_idx];
	if (tx_done ||
	    (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
		netdev = nic->pnicvf->netdev;
		txq = netdev_get_tx_queue(netdev,
					  nicvf_netdev_qidx(nic, cq_idx));
		txq = netdev_get_tx_queue(netdev, txq_idx);
		if (tx_pkts)
			netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);

@@ -735,10 +857,11 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
			if (netif_msg_tx_err(nic))
				netdev_warn(netdev,
					    "%s: Transmit queue wakeup SQ%d\n",
					    netdev->name, cq_idx);
					    netdev->name, txq_idx);
		}
	}

out:
	spin_unlock_bh(&cq->lock);
	return work_done;
}
@@ -1054,6 +1177,13 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
		return NETDEV_TX_OK;
	}

	/* In XDP case, initial HW tx queues are used for XDP,
	 * but stack's queue mapping starts at '0', so skip the
	 * Tx queues attached to Rx queues for XDP.
	 */
	if (nic->xdp_prog)
		qid += nic->xdp_tx_queues;

	snic = nic;
	/* Get secondary Qset's SQ structure */
	if (qid >= MAX_SND_QUEUES_PER_QS) {
@@ -1531,6 +1661,114 @@ static int nicvf_set_features(struct net_device *netdev,
	return 0;
}

static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
{
	u8 cq_count, txq_count;

	/* Set XDP Tx queue count same as Rx queue count */
	if (!bpf_attached)
		nic->xdp_tx_queues = 0;
	else
		nic->xdp_tx_queues = nic->rx_queues;

	/* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
	 * needs to be allocated, check how many.
	 */
	txq_count = nic->xdp_tx_queues + nic->tx_queues;
	cq_count = max(nic->rx_queues, txq_count);
	if (cq_count > MAX_CMP_QUEUES_PER_QS) {
		nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
	} else {
		nic->sqs_count = 0;
	}

	/* Set primary Qset's resources */
	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
	nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);

	/* Update stack */
	nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
}

static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
{
	struct net_device *dev = nic->netdev;
	bool if_up = netif_running(nic->netdev);
	struct bpf_prog *old_prog;
	bool bpf_attached = false;

	/* For now just support only the usual MTU sized frames */
	if (prog && (dev->mtu > 1500)) {
		netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
			    dev->mtu);
		return -EOPNOTSUPP;
	}

	/* ALL SQs attached to CQs i.e same as RQs, are treated as
	 * XDP Tx queues and more Tx queues are allocated for
	 * network stack to send pkts out.
	 *
	 * No of Tx queues are either same as Rx queues or whatever
	 * is left in max no of queues possible.
	 */
	if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
		netdev_warn(dev,
			    "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
			    nic->max_queues);
		return -ENOMEM;
	}

	if (if_up)
		nicvf_stop(nic->netdev);

	old_prog = xchg(&nic->xdp_prog, prog);
	/* Detach old prog, if any */
	if (old_prog)
		bpf_prog_put(old_prog);

	if (nic->xdp_prog) {
		/* Attach BPF program */
		nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
		if (!IS_ERR(nic->xdp_prog))
			bpf_attached = true;
	}

	/* Calculate Tx queues needed for XDP and network stack */
	nicvf_set_xdp_queues(nic, bpf_attached);

	if (if_up) {
		/* Reinitialize interface, clean slate */
		nicvf_open(nic->netdev);
		netif_trans_update(nic->netdev);
	}

	return 0;
}

static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
{
	struct nicvf *nic = netdev_priv(netdev);

	/* To avoid checks while retrieving buffer address from CQE_RX,
	 * do not support XDP for T88 pass1.x silicons which are anyway
	 * not in use widely.
	 */
	if (pass1_silicon(nic->pdev))
		return -EOPNOTSUPP;

	switch (xdp->command) {
	case XDP_SETUP_PROG:
		return nicvf_xdp_setup(nic, xdp->prog);
	case XDP_QUERY_PROG:
		xdp->prog_attached = !!nic->xdp_prog;
		return 0;
	default:
		return -EINVAL;
	}
}

static const struct net_device_ops nicvf_netdev_ops = {
	.ndo_open		= nicvf_open,
	.ndo_stop		= nicvf_stop,
@@ -1541,6 +1779,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
	.ndo_tx_timeout         = nicvf_tx_timeout,
	.ndo_fix_features       = nicvf_fix_features,
	.ndo_set_features       = nicvf_set_features,
	.ndo_xdp		= nicvf_xdp,
};

static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+319 −68

File changed.

Preview size limit exceeded, changes collapsed.

+31 −1
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#define NICVF_QUEUES_H

#include <linux/netdevice.h>
#include <linux/iommu.h>
#include "q_struct.h"

#define MAX_QUEUE_SET			128
@@ -213,6 +214,12 @@ struct q_desc_mem {
	void		*unalign_base;
};

struct pgcache {
	struct page	*page;
	int		ref_count;
	u64		dma_addr;
};

struct rbdr {
	bool		enable;
	u32		dma_size;
@@ -222,6 +229,13 @@ struct rbdr {
	u32		head;
	u32		tail;
	struct q_desc_mem   dmem;
	bool		is_xdp;

	/* For page recycling */
	int		pgidx;
	int		pgcnt;
	int		pgalloc;
	struct pgcache	*pgcache;
} ____cacheline_aligned_in_smp;

struct rcv_queue {
@@ -258,6 +272,10 @@ struct snd_queue {
	u32		tail;
	u64		*skbuff;
	void		*desc;
	u64		*xdp_page;
	u16		xdp_desc_cnt;
	u16		xdp_free_cnt;
	bool		is_xdp;

#define	TSO_HEADER_SIZE	128
	/* For TSO segment's header */
@@ -301,6 +319,14 @@ struct queue_set {

#define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)

static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
{
	/* Translation is installed only when IOMMU is present */
	if (nic->iommu_domain)
		return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
	return dma_addr;
}

void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
			      int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
@@ -318,8 +344,12 @@ void nicvf_sq_free_used_descs(struct net_device *netdev,
			      struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
			struct sk_buff *skb, u8 sq_num);
int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
			    u64 bufaddr, u64 dma_addr, u16 len);
void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);

struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
				  struct cqe_rx_t *cqe_rx, bool xdp);
void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work);

Loading