Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7d12ce1 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'xdp-more-work-on-xdp-tracepoints'

Jesper Dangaard Brouer says:

====================
xdp: more work on xdp tracepoints

More work on streamlining and performance optimizing the tracepoints
for XDP.

I've created a simple xdp_monitor application that uses this
tracepoint, and prints statistics. Available at github:

https://github.com/netoptimizer/prototype-kernel/blob/master/kernel/samples/bpf/xdp_monitor_kern.c
https://github.com/netoptimizer/prototype-kernel/blob/master/kernel/samples/bpf/xdp_monitor_user.c



The improvement over tracepoint with strcpy: 9810372 - 8428762 = +1381610 pps faster
 - (1/9810372 - 1/8428762)*10^9 = -16.7 nanosec
 - 100-(8428762/9810372*100) = strcpy-trace is 14.08% slower
 - 981037/8428762*100 = removing strcpy made it 11.64% faster

V3: Fix merge conflict with commit e4a8e817 ("bpf: misc xdp redirect cleanups")
V2: Change trace_xdp_redirect() to align with args of trace_xdp_exception()
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fb3bbbda 315ec399
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -9849,14 +9849,14 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
	int err;

	if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
		return -EINVAL;
		return -ENETDOWN;

	/* During program transitions its possible adapter->xdp_prog is assigned
	 * but ring has not been configured yet. In this case simply abort xmit.
	 */
	ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
	if (unlikely(!ring))
		return -EINVAL;
		return -ENXIO;

	err = ixgbe_xmit_xdp_ring(adapter, xdp);
	if (err != IXGBE_XDP_TX)
+2 −1
Original line number Diff line number Diff line
@@ -718,7 +718,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 * because we only track one map and force a flush when the map changes.
 * This does not appear to be a real limitation for existing software.
 */
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb);
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
			    struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
		    struct xdp_buff *xdp,
		    struct bpf_prog *prog);
+18 −18
Original line number Diff line number Diff line
@@ -31,53 +31,53 @@ TRACE_EVENT(xdp_exception,
	TP_ARGS(dev, xdp, act),

	TP_STRUCT__entry(
		__string(name, dev->name)
		__array(u8, prog_tag, 8)
		__field(u32, act)
		__field(int, ifindex)
	),

	TP_fast_assign(
		BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
		memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
		__assign_str(name, dev->name);
		__entry->act		= act;
		__entry->ifindex	= dev->ifindex;
	),

	TP_printk("prog=%s device=%s action=%s",
	TP_printk("prog=%s action=%s ifindex=%d",
		  __print_hex_str(__entry->prog_tag, 8),
		  __get_str(name),
		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
		  __entry->ifindex)
);

TRACE_EVENT(xdp_redirect,

	TP_PROTO(const struct net_device *from,
		 const struct net_device *to,
		 const struct bpf_prog *xdp, u32 act, int err),
	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp, u32 act,
		 int to_index, int err),

	TP_ARGS(from, to, xdp, act, err),
	TP_ARGS(dev, xdp, act, to_index, err),

	TP_STRUCT__entry(
		__string(name_from, from->name)
		__string(name_to, to->name)
		__array(u8, prog_tag, 8)
		__field(u32, act)
		__field(int, ifindex)
		__field(int, to_index)
		__field(int, err)
	),

	TP_fast_assign(
		BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
		memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
		__assign_str(name_from, from->name);
		__assign_str(name_to, to->name);
		__entry->act		= act;
		__entry->ifindex	= dev->ifindex;
		__entry->to_index	= to_index;
		__entry->err		= err;
	),

	TP_printk("prog=%s from=%s to=%s action=%s err=%d",
	TP_printk("prog=%s action=%s ifindex=%d to_index=%d err=%d",
		  __print_hex_str(__entry->prog_tag, 8),
		  __get_str(name_from), __get_str(name_to),
		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
		  __entry->ifindex, __entry->to_index,
		  __entry->err)
);
#endif /* _TRACE_XDP_H */
+2 −2
Original line number Diff line number Diff line
@@ -3953,7 +3953,8 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
		if (act != XDP_PASS) {
			switch (act) {
			case XDP_REDIRECT:
				err = xdp_do_generic_redirect(skb->dev, skb);
				err = xdp_do_generic_redirect(skb->dev, skb,
							      xdp_prog);
				if (err)
					goto out_redir;
			/* fallthru to submit skb */
@@ -3966,7 +3967,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
	}
	return XDP_PASS;
out_redir:
	trace_xdp_exception(skb->dev, xdp_prog, XDP_REDIRECT);
	kfree_skb(skb);
	return XDP_DROP;
}
+25 −22
Original line number Diff line number Diff line
@@ -2476,7 +2476,6 @@ static int __bpf_tx_xdp(struct net_device *dev,
	int err;

	if (!dev->netdev_ops->ndo_xdp_xmit) {
		bpf_warn_invalid_xdp_redirect(dev->ifindex);
		return -EOPNOTSUPP;
	}

@@ -2525,7 +2524,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
	if (likely(!err))
		ri->map_to_flush = map;
out:
	trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err);
	trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
	return err;
}

@@ -2543,39 +2542,48 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
	fwd = dev_get_by_index_rcu(dev_net(dev), index);
	ri->ifindex = 0;
	if (unlikely(!fwd)) {
		bpf_warn_invalid_xdp_redirect(index);
		err = -EINVAL;
		goto out;
	}

	err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
out:
	trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err);
	trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
	return err;
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);

int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb)
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
			    struct bpf_prog *xdp_prog)
{
	struct redirect_info *ri = this_cpu_ptr(&redirect_info);
	unsigned int len;
	u32 index = ri->ifindex;
	struct net_device *fwd;
	unsigned int len;
	int err = 0;

	dev = dev_get_by_index_rcu(dev_net(dev), index);
	fwd = dev_get_by_index_rcu(dev_net(dev), index);
	ri->ifindex = 0;
	if (unlikely(!dev)) {
		bpf_warn_invalid_xdp_redirect(index);
		return -EINVAL;
	if (unlikely(!fwd)) {
		err = -EINVAL;
		goto out;
	}

	if (unlikely(!(dev->flags & IFF_UP)))
		return -ENETDOWN;
	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
	if (skb->len > len)
		return -E2BIG;
	if (unlikely(!(fwd->flags & IFF_UP))) {
		err = -ENETDOWN;
		goto out;
	}

	skb->dev = dev;
	return 0;
	len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
	if (skb->len > len) {
		err = -EMSGSIZE;
		goto out;
	}

	skb->dev = fwd;
out:
	trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
	return err;
}
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);

@@ -3565,11 +3573,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);

void bpf_warn_invalid_xdp_redirect(u32 ifindex)
{
	WARN_ONCE(1, "Illegal XDP redirect to unsupported device ifindex(%i)\n", ifindex);
}

static bool __is_valid_sock_ops_access(int off, int size)
{
	if (off < 0 || off >= sizeof(struct bpf_sock_ops))