Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38edddb8 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Alexei Starovoitov
Browse files

xdp: add tracepoint for devmap like cpumap have



Notice how this allow us get XDP statistic without affecting the XDP
performance, as tracepoint is no-longer activated on a per packet basis.

V5: Spotted by John Fastabend.
 Fix 'sent' also counted 'drops' in this patch, a later patch corrected
 this, but it was a mistake in this intermediate step.

Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 5d053f9d
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -492,7 +492,8 @@ struct xdp_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);

struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -579,7 +580,8 @@ struct xdp_buff;
struct bpf_dtab_netdev;

static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
{
	return 0;
}
+39 −0
Original line number Diff line number Diff line
@@ -229,6 +229,45 @@ TRACE_EVENT(xdp_cpumap_enqueue,
		  __entry->to_cpu)
);

TRACE_EVENT(xdp_devmap_xmit,

	TP_PROTO(const struct bpf_map *map, u32 map_index,
		 int sent, int drops,
		 const struct net_device *from_dev,
		 const struct net_device *to_dev),

	TP_ARGS(map, map_index, sent, drops, from_dev, to_dev),

	TP_STRUCT__entry(
		__field(int, map_id)
		__field(u32, act)
		__field(u32, map_index)
		__field(int, drops)
		__field(int, sent)
		__field(int, from_ifindex)
		__field(int, to_ifindex)
	),

	TP_fast_assign(
		__entry->map_id		= map->id;
		__entry->act		= XDP_REDIRECT;
		__entry->map_index	= map_index;
		__entry->drops		= drops;
		__entry->sent		= sent;
		__entry->from_ifindex	= from_dev->ifindex;
		__entry->to_ifindex	= to_dev->ifindex;
	),

	TP_printk("ndo_xdp_xmit"
		  " map_id=%d map_index=%d action=%s"
		  " sent=%d drops=%d"
		  " from_ifindex=%d to_ifindex=%d",
		  __entry->map_id, __entry->map_index,
		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
		  __entry->sent, __entry->drops,
		  __entry->from_ifindex, __entry->to_ifindex)
);

#endif /* _TRACE_XDP_H */

#include <trace/define_trace.h>
+23 −4
Original line number Diff line number Diff line
@@ -58,6 +58,7 @@
#define DEV_MAP_BULK_SIZE 16
struct xdp_bulk_queue {
	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
	struct net_device *dev_rx;
	unsigned int count;
};

@@ -219,6 +220,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
			 struct xdp_bulk_queue *bq)
{
	struct net_device *dev = obj->dev;
	int sent = 0, drops = 0;
	int i;

	if (unlikely(!bq->count))
@@ -235,11 +237,18 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
		int err;

		err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
		if (err)
		if (err) {
			drops++;
			xdp_return_frame(xdpf);
		} else {
			sent++;
		}
	}
	bq->count = 0;

	trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
			      sent, drops, bq->dev_rx, dev);
	bq->dev_rx = NULL;
	return 0;
}

@@ -296,18 +305,28 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
 * Thus, safe percpu variable access.
 */
static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
		      struct net_device *dev_rx)

{
	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);

	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
		bq_xmit_all(obj, bq);

	/* Ingress dev_rx will be the same for all xdp_frame's in
	 * bulk_queue, because bq stored per-CPU and must be flushed
	 * from net_device drivers NAPI func end.
	 */
	if (!bq->dev_rx)
		bq->dev_rx = dev_rx;

	bq->q[bq->count++] = xdpf;
	return 0;
}

int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx)
{
	struct net_device *dev = dst->dev;
	struct xdp_frame *xdpf;
@@ -319,7 +338,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
	if (unlikely(!xdpf))
		return -EOVERFLOW;

	return bq_enqueue(dst, xdpf);
	return bq_enqueue(dst, xdpf, dev_rx);
}

static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
+1 −1
Original line number Diff line number Diff line
@@ -3067,7 +3067,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
	case BPF_MAP_TYPE_DEVMAP: {
		struct bpf_dtab_netdev *dst = fwd;

		err = dev_map_enqueue(dst, xdp);
		err = dev_map_enqueue(dst, xdp, dev_rx);
		if (err)
			return err;
		__dev_map_insert_ctx(map, index);