Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92595aea authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-fixes'



Daniel Borkmann says:

====================
bpf: couple of fixes

These are two fixes for BPF, one to introduce xmit recursion limiter for
tc bpf programs and the other one to reject filters a bit earlier. For
more details please see individual patches. I have no strong opinion
to which tree they should go, they apply to both, but I think net-next
seems okay to me.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f2a4d086 f7bd9e36
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -2389,6 +2389,8 @@ void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);

DECLARE_PER_CPU(int, xmit_recursion);
#define XMIT_RECURSION_LIMIT	10

static inline int dev_recursion_level(void)
{
	return this_cpu_read(xmit_recursion);
+2 −4
Original line number Diff line number Diff line
@@ -3144,8 +3144,6 @@ static void skb_update_prio(struct sk_buff *skb)
DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion);

#define RECURSION_LIMIT 10

/**
 *	dev_loopback_xmit - loop back @skb
 *	@net: network namespace this loopback is happening in
@@ -3388,8 +3386,8 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
		int cpu = smp_processor_id(); /* ok because BHs are off */

		if (txq->xmit_lock_owner != cpu) {

			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
			if (unlikely(__this_cpu_read(xmit_recursion) >
				     XMIT_RECURSION_LIMIT))
				goto recursion_alert;

			skb = validate_xmit_skb(skb, dev);
+49 −29
Original line number Diff line number Diff line
@@ -748,6 +748,17 @@ static bool chk_code_allowed(u16 code_to_probe)
	return codes[code_to_probe];
}

static bool bpf_check_basics_ok(const struct sock_filter *filter,
				unsigned int flen)
{
	if (filter == NULL)
		return false;
	if (flen == 0 || flen > BPF_MAXINSNS)
		return false;

	return true;
}

/**
 *	bpf_check_classic - verify socket filter code
 *	@filter: filter to verify
@@ -768,9 +779,6 @@ static int bpf_check_classic(const struct sock_filter *filter,
	bool anc_found;
	int pc;

	if (flen == 0 || flen > BPF_MAXINSNS)
		return -EINVAL;

	/* Check the filter code now */
	for (pc = 0; pc < flen; pc++) {
		const struct sock_filter *ftest = &filter[pc];
@@ -1065,7 +1073,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
	struct bpf_prog *fp;

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
		return -EINVAL;

	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1112,7 +1120,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
	int err;

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
		return -EINVAL;

	fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@@ -1207,7 +1215,6 @@ static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
{
	unsigned int fsize = bpf_classic_proglen(fprog);
	unsigned int bpf_fsize = bpf_prog_size(fprog->len);
	struct bpf_prog *prog;
	int err;

@@ -1215,10 +1222,10 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
		return ERR_PTR(-EPERM);

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
	if (!bpf_check_basics_ok(fprog->filter, fprog->len))
		return ERR_PTR(-EINVAL);

	prog = bpf_prog_alloc(bpf_fsize, 0);
	prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
	if (!prog)
		return ERR_PTR(-ENOMEM);

@@ -1603,9 +1610,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
	.arg5_type	= ARG_ANYTHING,
};

static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
	if (skb_at_tc_ingress(skb))
		skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);

	return dev_forward_skb(dev, skb);
}

static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
	int ret;

	if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
		kfree_skb(skb);
		return -ENETDOWN;
	}

	skb->dev = dev;

	__this_cpu_inc(xmit_recursion);
	ret = dev_queue_xmit(skb);
	__this_cpu_dec(xmit_recursion);

	return ret;
}

static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{
	struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
	struct sk_buff *skb = (struct sk_buff *) (long) r1;
	struct net_device *dev;

	if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -1615,19 +1649,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
	if (unlikely(!dev))
		return -EINVAL;

	skb2 = skb_clone(skb, GFP_ATOMIC);
	if (unlikely(!skb2))
	skb = skb_clone(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return -ENOMEM;

	if (flags & BPF_F_INGRESS) {
		if (skb_at_tc_ingress(skb2))
			skb_postpush_rcsum(skb2, skb_mac_header(skb2),
					   skb2->mac_len);
		return dev_forward_skb(dev, skb2);
	}

	skb2->dev = dev;
	return dev_queue_xmit(skb2);
	return flags & BPF_F_INGRESS ?
	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}

static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1671,15 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb)
		return -EINVAL;
	}

	if (ri->flags & BPF_F_INGRESS) {
		if (skb_at_tc_ingress(skb))
			skb_postpush_rcsum(skb, skb_mac_header(skb),
					   skb->mac_len);
		return dev_forward_skb(dev, skb);
	}

	skb->dev = dev;
	return dev_queue_xmit(skb);
	return ri->flags & BPF_F_INGRESS ?
	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}

static const struct bpf_func_proto bpf_redirect_proto = {