Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69b45078 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'misc-BPF-improvements'



Daniel Borkmann says:

====================
This set adds various patches I still had in my queue, first two
are test cases to provide coverage for the recent two fixes that
went to bpf tree, then a small improvement on the error message
for gpl helpers. Next, we expose prog and map id into fdinfo in
order to allow for inspection of these objections currently used
in applications. Patch after that removes a retpoline call for
map lookup/update/delete helpers. A new helper is added in the
subsequent patch to lookup the skb's socket's cgroup v2 id which
can be used in an efficient way for e.g. lookups on egress side.
Next one is a fix to fully clear state info in tunnel/xfrm helpers.
Given this is full cap_sys_admin from init ns and has same priv
requirements like tracing, bpf-next should be okay. A small bug
fix for bpf_asm follows, and next a fix for context access in
tracing which was recently reported. Lastly, a small update in
the maintainer's file to add patchwork url and missing files.

Thanks!

v2 -> v3:
  - Noticed a merge artefact inside uapi header comment, sigh,
    fixed now.
v1 -> v2:
  - minor fix in getting context access work on 32 bit for tracing
  - add paragraph to uapi helper doc to better describe kernel
    build deps for cggroup helper
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 25c1013e 10a76564
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -2722,6 +2722,7 @@ L: netdev@vger.kernel.org
L:	linux-kernel@vger.kernel.org
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q:	https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
S:	Supported
F:	arch/x86/net/bpf_jit*
F:	Documentation/networking/filter.txt
@@ -2740,6 +2741,7 @@ F: net/sched/act_bpf.c
F:	net/sched/cls_bpf.c
F:	samples/bpf/
F:	tools/bpf/
F:	tools/lib/bpf/
F:	tools/testing/selftests/bpf/

BROADCOM B44 10/100 ETHERNET DRIVER
+37 −6
Original line number Diff line number Diff line
@@ -289,8 +289,21 @@ struct xdp_buff;
		.off   = OFF,					\
		.imm   = 0 })

/* Relative call */

#define BPF_CALL_REL(TGT)					\
	((struct bpf_insn) {					\
		.code  = BPF_JMP | BPF_CALL,			\
		.dst_reg = 0,					\
		.src_reg = BPF_PSEUDO_CALL,			\
		.off   = 0,					\
		.imm   = TGT })

/* Function call */

#define BPF_CAST_CALL(x)					\
		((u64 (*)(u64, u64, u64, u64, u64))(x))

#define BPF_EMIT_CALL(FUNC)					\
	((struct bpf_insn) {					\
		.code  = BPF_JMP | BPF_CALL,			\
@@ -626,16 +639,34 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
	return prog->type == BPF_PROG_TYPE_UNSPEC;
}

static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
static inline u32 bpf_ctx_off_adjust_machine(u32 size)
{
	const u32 size_machine = sizeof(unsigned long);

	if (size > size_machine && size % size_machine == 0)
		size = size_machine;

	return size;
}

static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
					   u32 size_default)
{
	bool off_ok;
	size_default = bpf_ctx_off_adjust_machine(size_default);
	size_access  = bpf_ctx_off_adjust_machine(size_access);

#ifdef __LITTLE_ENDIAN
	off_ok = (off & (size_default - 1)) == 0;
	return (off & (size_default - 1)) == 0;
#else
	off_ok = (off & (size_default - 1)) + size == size_default;
	return (off & (size_default - 1)) + size_access == size_default;
#endif
	return off_ok && size <= size_default && (size & (size - 1)) == 0;
}

static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
	return bpf_ctx_narrow_align_ok(off, size, size_default) &&
	       size <= size_default && (size & (size - 1)) == 0;
}

#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+20 −2
Original line number Diff line number Diff line
@@ -2054,6 +2054,22 @@ union bpf_attr {
 *
 *	Return
 *		0
 *
 * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
 * 	Description
 * 		Return the cgroup v2 id of the socket associated with the *skb*.
 * 		This is roughly similar to the **bpf_get_cgroup_classid**\ ()
 * 		helper for cgroup v1 by providing a tag resp. identifier that
 * 		can be matched on or used for map lookups e.g. to implement
 * 		policy. The cgroup v2 id of a given path in the hierarchy is
 * 		exposed in user space through the f_handle API in order to get
 * 		to the same 64-bit id.
 *
 * 		This helper can be used on TC egress path, but not on ingress,
 * 		and is available only if the kernel was compiled with the
 * 		**CONFIG_SOCK_CGROUP_DATA** configuration option.
 * 	Return
 * 		The id is returned or 0 in case the id could not be retrieved.
 */
#define __BPF_FUNC_MAPPER(FN)		\
	FN(unspec),			\
@@ -2134,7 +2150,8 @@ union bpf_attr {
	FN(lwt_seg6_adjust_srh),	\
	FN(lwt_seg6_action),		\
	FN(rc_repeat),			\
	FN(rc_keydown),
	FN(rc_keydown),			\
	FN(skb_cgroup_id),

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
 * function eBPF program intends to call
@@ -2251,7 +2268,7 @@ struct bpf_tunnel_key {
	};
	__u8 tunnel_tos;
	__u8 tunnel_ttl;
	__u16 tunnel_ext;
	__u16 tunnel_ext;	/* Padding, future use. */
	__u32 tunnel_label;
};

@@ -2262,6 +2279,7 @@ struct bpf_xfrm_state {
	__u32 reqid;
	__u32 spi;	/* Stored in network byte order */
	__u16 family;
	__u16 ext;	/* Padding, future use. */
	union {
		__u32 remote_ipv4;	/* Stored in network byte order */
		__u32 remote_ipv6[4];	/* Stored in network byte order */
+9 −3
Original line number Diff line number Diff line
@@ -503,7 +503,9 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
	struct bpf_insn *insn = insn_buf;
	const int ret = BPF_REG_0;

	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
		     (void *(*)(struct bpf_map *map, void *key))NULL));
	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
				offsetof(struct htab_elem, key) +
@@ -530,7 +532,9 @@ static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
	const int ret = BPF_REG_0;
	const int ref_reg = BPF_REG_1;

	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
		     (void *(*)(struct bpf_map *map, void *key))NULL));
	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
			      offsetof(struct htab_elem, lru_node) +
@@ -1369,7 +1373,9 @@ static u32 htab_of_map_gen_lookup(struct bpf_map *map,
	struct bpf_insn *insn = insn_buf;
	const int ret = BPF_REG_0;

	*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
		     (void *(*)(struct bpf_map *map, void *key))NULL));
	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
				offsetof(struct htab_elem, key) +
+8 −4
Original line number Diff line number Diff line
@@ -327,13 +327,15 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
		   "value_size:\t%u\n"
		   "max_entries:\t%u\n"
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
		   "memlock:\t%llu\n"
		   "map_id:\t%u\n",
		   map->map_type,
		   map->key_size,
		   map->value_size,
		   map->max_entries,
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);
		   map->pages * 1ULL << PAGE_SHIFT,
		   map->id);

	if (owner_prog_type) {
		seq_printf(m, "owner_prog_type:\t%u\n",
@@ -1070,11 +1072,13 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
		   "prog_tag:\t%s\n"
		   "memlock:\t%llu\n",
		   "memlock:\t%llu\n"
		   "prog_id:\t%u\n",
		   prog->type,
		   prog->jited,
		   prog_tag,
		   prog->pages * 1ULL << PAGE_SHIFT);
		   prog->pages * 1ULL << PAGE_SHIFT,
		   prog->aux->id);
}
#endif

Loading