Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d2af27a authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-flow-dissector-tests'



Stanislav Fomichev says:

====================
This patch series adds support for testing flow dissector BPF programs
by extending already existing BPF_PROG_TEST_RUN. The goal is to have
a packet as an input and `struct bpf_flow_key' as an output. That way
we can easily test flow dissector programs' behavior. I've also modified
existing test_progs.c test to do a simple flow dissector run as well.

* first patch introduces new __skb_flow_bpf_dissect to simplify
  sharing between __skb_flow_bpf_dissect and BPF_PROG_TEST_RUN
* second patch adds actual BPF_PROG_TEST_RUN support
* third patch adds example usage to the selftests

v3:
* rebased on top of latest bpf-next

v2:
* loop over 'kattr->test.repeat' inside of
  bpf_prog_test_run_flow_dissector, don't reuse
  bpf_test_run/bpf_test_run_one
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents d76198b0 bf0f0fd9
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -404,6 +404,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr);

/* an array of programs to be executed under rcu_lock.
 *
+5 −0
Original line number Diff line number Diff line
@@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
}
#endif

struct bpf_flow_keys;
bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
			    const struct sk_buff *skb,
			    struct flow_dissector *flow_dissector,
			    struct bpf_flow_keys *flow_keys);
bool __skb_flow_dissect(const struct sk_buff *skb,
			struct flow_dissector *flow_dissector,
			void *target_container,
+82 −0
Original line number Diff line number Diff line
@@ -240,3 +240,85 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
	kfree(data);
	return ret;
}

int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr)
{
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
	struct bpf_flow_keys flow_keys;
	u64 time_start, time_spent = 0;
	struct bpf_skb_data_end *cb;
	u32 retval, duration;
	struct sk_buff *skb;
	struct sock *sk;
	void *data;
	int ret;
	u32 i;

	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
		return -EINVAL;

	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
	if (IS_ERR(data))
		return PTR_ERR(data);

	sk = kzalloc(sizeof(*sk), GFP_USER);
	if (!sk) {
		kfree(data);
		return -ENOMEM;
	}
	sock_net_set(sk, current->nsproxy->net_ns);
	sock_init_data(NULL, sk);

	skb = build_skb(data, 0);
	if (!skb) {
		kfree(data);
		kfree(sk);
		return -ENOMEM;
	}
	skb->sk = sk;

	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
	__skb_put(skb, size);
	skb->protocol = eth_type_trans(skb,
				       current->nsproxy->net_ns->loopback_dev);
	skb_reset_network_header(skb);

	cb = (struct bpf_skb_data_end *)skb->cb;
	cb->qdisc_cb.flow_keys = &flow_keys;

	if (!repeat)
		repeat = 1;

	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
		preempt_disable();
		rcu_read_lock();
		retval = __skb_flow_bpf_dissect(prog, skb,
						&flow_keys_dissector,
						&flow_keys);
		rcu_read_unlock();
		preempt_enable();

		if (need_resched()) {
			if (signal_pending(current))
				break;
			time_spent += ktime_get_ns() - time_start;
			cond_resched();
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
	do_div(time_spent, repeat);
	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;

	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
			      retval, duration);

	kfree_skb(skb);
	kfree(sk);
	return ret;
}
+1 −0
Original line number Diff line number Diff line
@@ -7711,6 +7711,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};

const struct bpf_prog_ops flow_dissector_prog_ops = {
	.test_run		= bpf_prog_test_run_flow_dissector,
};

int sk_detach_filter(struct sock *sk)
+54 −38
Original line number Diff line number Diff line
@@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
	}
}

bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
			    const struct sk_buff *skb,
			    struct flow_dissector *flow_dissector,
			    struct bpf_flow_keys *flow_keys)
{
	struct bpf_skb_data_end cb_saved;
	struct bpf_skb_data_end *cb;
	u32 result;

	/* Note that even though the const qualifier is discarded
	 * throughout the execution of the BPF program, all changes(the
	 * control block) are reverted after the BPF program returns.
	 * Therefore, __skb_flow_dissect does not alter the skb.
	 */

	cb = (struct bpf_skb_data_end *)skb->cb;

	/* Save Control Block */
	memcpy(&cb_saved, cb, sizeof(cb_saved));
	memset(cb, 0, sizeof(*cb));

	/* Pass parameters to the BPF program */
	memset(flow_keys, 0, sizeof(*flow_keys));
	cb->qdisc_cb.flow_keys = flow_keys;
	flow_keys->nhoff = skb_network_offset(skb);
	flow_keys->thoff = flow_keys->nhoff;

	bpf_compute_data_pointers((struct sk_buff *)skb);
	result = BPF_PROG_RUN(prog, skb);

	/* Restore state */
	memcpy(cb, &cb_saved, sizeof(cb_saved));

	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
				   flow_keys->nhoff, skb->len);

	return result == BPF_OK;
}

/**
 * __skb_flow_dissect - extract the flow_keys struct and return it
 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
	struct flow_dissector_key_vlan *key_vlan;
	enum flow_dissect_ret fdret;
	enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
	struct bpf_prog *attached = NULL;
	int num_hdrs = 0;
	u8 ip_proto = 0;
	bool ret;
@@ -754,53 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
					      FLOW_DISSECTOR_KEY_BASIC,
					      target_container);

	rcu_read_lock();
	if (skb) {
		struct bpf_flow_keys flow_keys;
		struct bpf_prog *attached = NULL;

		rcu_read_lock();

		if (skb->dev)
			attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
		else if (skb->sk)
			attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
		else
			WARN_ON_ONCE(1);
	}
	if (attached) {
		/* Note that even though the const qualifier is discarded
		 * throughout the execution of the BPF program, all changes(the
		 * control block) are reverted after the BPF program returns.
		 * Therefore, __skb_flow_dissect does not alter the skb.
		 */
		struct bpf_flow_keys flow_keys = {};
		struct bpf_skb_data_end cb_saved;
		struct bpf_skb_data_end *cb;
		u32 result;

		cb = (struct bpf_skb_data_end *)skb->cb;

		/* Save Control Block */
		memcpy(&cb_saved, cb, sizeof(cb_saved));
		memset(cb, 0, sizeof(cb_saved));

		/* Pass parameters to the BPF program */
		cb->qdisc_cb.flow_keys = &flow_keys;
		flow_keys.nhoff = nhoff;
		flow_keys.thoff = nhoff;

		bpf_compute_data_pointers((struct sk_buff *)skb);
		result = BPF_PROG_RUN(attached, skb);

		/* Restore state */
		memcpy(cb, &cb_saved, sizeof(cb_saved));

		flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
		flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
					  flow_keys.nhoff, skb->len);

		if (attached) {
			ret = __skb_flow_bpf_dissect(attached, skb,
						     flow_dissector,
						     &flow_keys);
			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
						 target_container);
			rcu_read_unlock();
		return result == BPF_OK;
			return ret;
		}
		rcu_read_unlock();
	}

	if (dissector_uses_key(flow_dissector,
			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
Loading