Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 763f9b41 authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-xdp-remove-xdp-flush'



Jesper Dangaard Brouer says:

====================
This patchset removes the net_device operation ndo_xdp_flush() call.
This is a follow merge commit ea9916ea ("Merge branch
'ndo_xdp_xmit-cleanup'").  As after commit c1ece6b2 ("bpf/xdp:
devmap can avoid calling ndo_xdp_flush") no callers of ndo_xdp_flush
are left in bpf-next tree.
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 68565a1a 189454e8
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -11883,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = {
	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
	.ndo_bpf		= i40e_xdp,
	.ndo_xdp_xmit		= i40e_xdp_xmit,
	.ndo_xdp_flush		= i40e_xdp_flush,
};

/**
+0 −19
Original line number Diff line number Diff line
@@ -3707,22 +3707,3 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,

	return n - drops;
}

/**
 * i40e_xdp_flush - Implements ndo_xdp_flush
 * @dev: netdev
 **/
void i40e_xdp_flush(struct net_device *dev)
{
	struct i40e_netdev_priv *np = netdev_priv(dev);
	unsigned int queue_index = smp_processor_id();
	struct i40e_vsi *vsi = np->vsi;

	if (test_bit(__I40E_VSI_DOWN, vsi->state))
		return;

	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
		return;

	i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
}
+0 −1
Original line number Diff line number Diff line
@@ -489,7 +489,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
		  u32 flags);
void i40e_xdp_flush(struct net_device *dev);

/**
 * i40e_get_head - Retrieve head from head writeback
+0 −21
Original line number Diff line number Diff line
@@ -10069,26 +10069,6 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
	return n - drops;
}

static void ixgbe_xdp_flush(struct net_device *dev)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_ring *ring;

	/* Its possible the device went down between xdp xmit and flush so
	 * we need to ensure device is still up.
	 */
	if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
		return;

	ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
	if (unlikely(!ring))
		return;

	ixgbe_xdp_ring_update_tail(ring);

	return;
}

static const struct net_device_ops ixgbe_netdev_ops = {
	.ndo_open		= ixgbe_open,
	.ndo_stop		= ixgbe_close,
@@ -10136,7 +10116,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
	.ndo_features_check	= ixgbe_features_check,
	.ndo_bpf		= ixgbe_xdp,
	.ndo_xdp_xmit		= ixgbe_xdp_xmit,
	.ndo_xdp_flush		= ixgbe_xdp_flush,
};

/**
+1 −22
Original line number Diff line number Diff line
@@ -1347,26 +1347,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
	if (unlikely(!frame))
		return -EOVERFLOW;

	return tun_xdp_xmit(dev, 1, &frame, 0);
}

static void tun_xdp_flush(struct net_device *dev)
{
	struct tun_struct *tun = netdev_priv(dev);
	struct tun_file *tfile;
	u32 numqueues;

	rcu_read_lock();

	numqueues = READ_ONCE(tun->numqueues);
	if (!numqueues)
		goto out;

	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
					    numqueues]);
	__tun_xdp_flush_tfile(tfile);
out:
	rcu_read_unlock();
	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
}

static const struct net_device_ops tap_netdev_ops = {
@@ -1387,7 +1368,6 @@ static const struct net_device_ops tap_netdev_ops = {
	.ndo_get_stats64	= tun_net_get_stats64,
	.ndo_bpf		= tun_xdp,
	.ndo_xdp_xmit		= tun_xdp_xmit,
	.ndo_xdp_flush		= tun_xdp_flush,
};

static void tun_flow_init(struct tun_struct *tun)
@@ -1706,7 +1686,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
			alloc_frag->offset += buflen;
			if (tun_xdp_tx(tun->dev, &xdp))
				goto err_redirect;
			tun_xdp_flush(tun->dev);
			rcu_read_unlock();
			preempt_enable();
			return NULL;
Loading