Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c487e6b1 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller
Browse files

nfp: store dma direction in data path structure



Instead of testing if xdp_prog is present store the dma direction
in data path structure.

Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 892a7f70
Loading
Loading
Loading
Loading
+7 −4
Original line number Original line Diff line number Diff line
@@ -438,6 +438,7 @@ struct nfp_stat_pair {
 * @bpf_offload_skip_sw:  Offloaded BPF program will not be rerun by cls_bpf
 * @bpf_offload_skip_sw:  Offloaded BPF program will not be rerun by cls_bpf
 * @bpf_offload_xdp:	Offloaded BPF program is XDP
 * @bpf_offload_xdp:	Offloaded BPF program is XDP
 * @chained_metadata_format:  Firemware will use new metadata format
 * @chained_metadata_format:  Firemware will use new metadata format
 * @rx_dma_dir:		Mapping direction for RX buffers
 * @ctrl:		Local copy of the control register/word.
 * @ctrl:		Local copy of the control register/word.
 * @fl_bufsz:		Currently configured size of the freelist buffers
 * @fl_bufsz:		Currently configured size of the freelist buffers
 * @rx_offset:		Offset in the RX buffers where packet data starts
 * @rx_offset:		Offset in the RX buffers where packet data starts
@@ -458,10 +459,12 @@ struct nfp_net_dp {
	struct device *dev;
	struct device *dev;
	struct net_device *netdev;
	struct net_device *netdev;


	unsigned is_vf:1;
	u8 is_vf:1;
	unsigned bpf_offload_skip_sw:1;
	u8 bpf_offload_skip_sw:1;
	unsigned bpf_offload_xdp:1;
	u8 bpf_offload_xdp:1;
	unsigned chained_metadata_format:1;
	u8 chained_metadata_format:1;

	u8 rx_dma_dir;


	u32 ctrl;
	u32 ctrl;
	u32 fl_bufsz;
	u32 fl_bufsz;
+17 −28
Original line number Original line Diff line number Diff line
@@ -85,20 +85,18 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
	put_unaligned_le32(reg, fw_ver);
	put_unaligned_le32(reg, fw_ver);
}
}


static dma_addr_t
static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag, int direction)
{
{
	return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
	return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
			      dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
			      dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
			      direction);
			      dp->rx_dma_dir);
}
}


static void
static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
		     int direction)
{
{
	dma_unmap_single(dp->dev, dma_addr,
	dma_unmap_single(dp->dev, dma_addr,
			 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
			 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
			 dp->rx_dma_dir);
}
}


/* Firmware reconfig
/* Firmware reconfig
@@ -991,8 +989,7 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
		if (!tx_ring->txbufs[idx].frag)
		if (!tx_ring->txbufs[idx].frag)
			continue;
			continue;


		nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr,
		nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr);
				     DMA_BIDIRECTIONAL);
		__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
		__free_page(virt_to_page(tx_ring->txbufs[idx].frag));


		done_pkts++;
		done_pkts++;
@@ -1037,8 +1034,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
		tx_buf = &tx_ring->txbufs[idx];
		tx_buf = &tx_ring->txbufs[idx];


		if (tx_ring == r_vec->xdp_ring) {
		if (tx_ring == r_vec->xdp_ring) {
			nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr,
			nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr);
					     DMA_BIDIRECTIONAL);
			__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
			__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
		} else {
		} else {
			struct sk_buff *skb = tx_ring->txbufs[idx].skb;
			struct sk_buff *skb = tx_ring->txbufs[idx].skb;
@@ -1139,7 +1135,6 @@ static void *
nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
		     dma_addr_t *dma_addr)
		     dma_addr_t *dma_addr)
{
{
	int direction;
	void *frag;
	void *frag;


	if (!dp->xdp_prog)
	if (!dp->xdp_prog)
@@ -1151,9 +1146,7 @@ nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
		return NULL;
		return NULL;
	}
	}


	direction = dp->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
	*dma_addr = nfp_net_dma_map_rx(dp, frag);

	*dma_addr = nfp_net_dma_map_rx(dp, frag, direction);
	if (dma_mapping_error(dp->dev, *dma_addr)) {
	if (dma_mapping_error(dp->dev, *dma_addr)) {
		nfp_net_free_frag(frag, dp->xdp_prog);
		nfp_net_free_frag(frag, dp->xdp_prog);
		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
@@ -1163,9 +1156,7 @@ nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
	return frag;
	return frag;
}
}


static void *
static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
nfp_net_napi_alloc_one(struct nfp_net_dp *dp, int direction,
		       dma_addr_t *dma_addr)
{
{
	void *frag;
	void *frag;


@@ -1178,7 +1169,7 @@ nfp_net_napi_alloc_one(struct nfp_net_dp *dp, int direction,
		return NULL;
		return NULL;
	}
	}


	*dma_addr = nfp_net_dma_map_rx(dp, frag, direction);
	*dma_addr = nfp_net_dma_map_rx(dp, frag);
	if (dma_mapping_error(dp->dev, *dma_addr)) {
	if (dma_mapping_error(dp->dev, *dma_addr)) {
		nfp_net_free_frag(frag, dp->xdp_prog);
		nfp_net_free_frag(frag, dp->xdp_prog);
		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
@@ -1260,7 +1251,6 @@ static void
nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
			  struct nfp_net_rx_ring *rx_ring)
			  struct nfp_net_rx_ring *rx_ring)
{
{
	int direction = dp->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
	unsigned int i;
	unsigned int i;


	for (i = 0; i < rx_ring->cnt - 1; i++) {
	for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1271,8 +1261,7 @@ nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
		if (!rx_ring->rxbufs[i].frag)
		if (!rx_ring->rxbufs[i].frag)
			continue;
			continue;


		nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr,
		nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
				     direction);
		nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
		nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
		rx_ring->rxbufs[i].dma_addr = 0;
		rx_ring->rxbufs[i].dma_addr = 0;
		rx_ring->rxbufs[i].frag = NULL;
		rx_ring->rxbufs[i].frag = NULL;
@@ -1478,7 +1467,7 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
		return false;
		return false;
	}
	}


	new_frag = nfp_net_napi_alloc_one(dp, DMA_BIDIRECTIONAL, &new_dma_addr);
	new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
	if (unlikely(!new_frag)) {
	if (unlikely(!new_frag)) {
		nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
		nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
		return false;
		return false;
@@ -1544,12 +1533,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
	unsigned int true_bufsz;
	unsigned int true_bufsz;
	struct sk_buff *skb;
	struct sk_buff *skb;
	int pkts_polled = 0;
	int pkts_polled = 0;
	int rx_dma_map_dir;
	int idx;
	int idx;


	rcu_read_lock();
	rcu_read_lock();
	xdp_prog = READ_ONCE(dp->xdp_prog);
	xdp_prog = READ_ONCE(dp->xdp_prog);
	rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
	true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
	true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
	tx_ring = r_vec->xdp_ring;
	tx_ring = r_vec->xdp_ring;


@@ -1639,14 +1626,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
			continue;
			continue;
		}
		}
		new_frag = nfp_net_napi_alloc_one(dp, rx_dma_map_dir,
		new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
						  &new_dma_addr);
		if (unlikely(!new_frag)) {
		if (unlikely(!new_frag)) {
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
			nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
			continue;
			continue;
		}
		}


		nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr, rx_dma_map_dir);
		nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);


		nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
		nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);


@@ -2899,6 +2885,7 @@ static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)


	dp->xdp_prog = prog;
	dp->xdp_prog = prog;
	dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
	dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
	dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;


	/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
	/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
	err = nfp_net_ring_reconfig(nn, dp);
	err = nfp_net_ring_reconfig(nn, dp);
@@ -3128,6 +3115,8 @@ int nfp_net_netdev_init(struct net_device *netdev)


	nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
	nn->dp.chained_metadata_format = nn->fw_ver.major > 3;


	nn->dp.rx_dma_dir = DMA_FROM_DEVICE;

	/* Get some of the read-only fields from the BAR */
	/* Get some of the read-only fields from the BAR */
	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);