Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ed70267 authored by Willem de Bruijn's avatar Willem de Bruijn Committed by Greg Kroah-Hartman
Browse files

bpf: support SKF_NET_OFF and SKF_LL_OFF on skb frags

[ Upstream commit d4bac0288a2b444e468e6df9cb4ed69479ddf14a ]

Classic BPF socket filters with SKB_NET_OFF and SKB_LL_OFF fail to
read when these offsets extend into frags.

This has been observed with iwlwifi and reproduced with tun with
IFF_NAPI_FRAGS. The below straightforward socket filter on UDP port,
applied to a RAW socket, will silently miss matching packets.

    const int offset_proto = offsetof(struct ip6_hdr, ip6_nxt);
    const int offset_dport = sizeof(struct ip6_hdr) + offsetof(struct udphdr, dest);
    struct sock_filter filter_code[] = {
            BPF_STMT(BPF_LD  + BPF_B   + BPF_ABS, SKF_AD_OFF + SKF_AD_PKTTYPE),
            BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, PACKET_HOST, 0, 4),
            BPF_STMT(BPF_LD  + BPF_B   + BPF_ABS, SKF_NET_OFF + offset_proto),
            BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 0, 2),
            BPF_STMT(BPF_LD  + BPF_H   + BPF_ABS, SKF_NET_OFF + offset_dport),

This is unexpected behavior. Socket filter programs should be
consistent regardless of environment. Silent misses are
particularly concerning as hard to detect.

Use skb_copy_bits for offsets outside linear, same as done for
non-SKF_(LL|NET) offsets.

Offset is always positive after subtracting the reference threshold
SKB_(LL|NET)_OFF, so is always >= skb_(mac|network)_offset. The sum of
the two is an offset against skb->data, and may be negative, but it
cannot point before skb->head, as skb_(mac|network)_offset would too.

This appears to go back to when frag support was introduced to
sk_run_filter in linux-2.4.4, before the introduction of git.

The amount of code change and 8/16/32 bit duplication are unfortunate.
But any attempt I made to be smarter saved very few LoC while
complicating the code.

Fixes: 1da177e4 ("Linux-2.6.12-rc2")
Link: https://lore.kernel.org/netdev/20250122200402.3461154-1-maze@google.com/
Link: https://elixir.bootlin.com/linux/2.4.4/source/net/core/filter.c#L244


Reported-by: default avatarMatt Moeller <moeller.matt@gmail.com>
Co-developed-by: default avatarMaciej Żenczykowski <maze@google.com>
Signed-off-by: default avatarMaciej Żenczykowski <maze@google.com>
Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Acked-by: default avatarStanislav Fomichev <sdf@fomichev.me>
Link: https://lore.kernel.org/r/20250408132833.195491-2-willemdebruijn.kernel@gmail.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent a74e04de
Loading
Loading
Loading
Loading
+44 −36
Original line number Diff line number Diff line
@@ -175,23 +175,35 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
	return 0;
}

static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset)
{
	if (likely(offset >= 0))
		return offset;

	if (offset >= SKF_NET_OFF)
		return offset - SKF_NET_OFF + skb_network_offset(skb);

	if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb))
		return offset - SKF_LL_OFF + skb_mac_offset(skb);

	return INT_MIN;
}

BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
	   data, int, headlen, int, offset)
{
	u8 tmp, *ptr;
	u8 tmp;
	const int len = sizeof(tmp);

	if (offset >= 0) {
	offset = bpf_skb_load_helper_convert_offset(skb, offset);
	if (offset == INT_MIN)
		return -EFAULT;

	if (headlen - offset >= len)
		return *(u8 *)(data + offset);
	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
		return tmp;
	} else {
		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
		if (likely(ptr))
			return *(u8 *)ptr;
	}

	else
		return -EFAULT;
}

@@ -205,20 +217,18 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
	   data, int, headlen, int, offset)
{
	__be16 tmp, *ptr;
	__be16 tmp;
	const int len = sizeof(tmp);

	if (offset >= 0) {
	offset = bpf_skb_load_helper_convert_offset(skb, offset);
	if (offset == INT_MIN)
		return -EFAULT;

	if (headlen - offset >= len)
		return get_unaligned_be16(data + offset);
	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
		return be16_to_cpu(tmp);
	} else {
		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
		if (likely(ptr))
			return get_unaligned_be16(ptr);
	}

	else
		return -EFAULT;
}

@@ -232,20 +242,18 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
	   data, int, headlen, int, offset)
{
	__be32 tmp, *ptr;
	__be32 tmp;
	const int len = sizeof(tmp);

	if (likely(offset >= 0)) {
	offset = bpf_skb_load_helper_convert_offset(skb, offset);
	if (offset == INT_MIN)
		return -EFAULT;

	if (headlen - offset >= len)
		return get_unaligned_be32(data + offset);
	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
		return be32_to_cpu(tmp);
	} else {
		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
		if (likely(ptr))
			return get_unaligned_be32(ptr);
	}

	else
		return -EFAULT;
}