Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc7430cc authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Daniel Borkmann
Browse files

selftests/bpf: convert selftests using BTF-defined maps to new syntax



Convert all the existing selftests that are already using BTF-defined
maps to use new syntax (with no static data initialization).

Signed-off-by: default avatarAndrii Nakryiko <andriin@fb.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 00acd008
Loading
Loading
Loading
Loading
+10 −18
Original line number Diff line number Diff line
@@ -58,26 +58,18 @@ struct frag_hdr {
};

struct {
	__u32 type;
	__u32 max_entries;
	__u32 key_size;
	__u32 value_size;
} jmp_table SEC(".maps") = {
	.type = BPF_MAP_TYPE_PROG_ARRAY,
	.max_entries = 8,
	.key_size = sizeof(__u32),
	.value_size = sizeof(__u32),
};
	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
	__uint(max_entries, 8);
	__uint(key_size, sizeof(__u32));
	__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");

struct {
	__u32 type;
	__u32 max_entries;
	__u32 *key;
	struct bpf_flow_keys *value;
} last_dissection SEC(".maps") = {
	.type = BPF_MAP_TYPE_ARRAY,
	.max_entries = 1,
};
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__uint(max_entries, 1);
	__type(key, __u32);
	__type(value, struct bpf_flow_keys);
} last_dissection SEC(".maps");

static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
					    int ret)
+8 −12
Original line number Diff line number Diff line
@@ -11,20 +11,16 @@
#define NS_PER_SEC	1000000000

struct {
	__u32 type;
	struct bpf_cgroup_storage_key *key;
	struct percpu_net_cnt *value;
} percpu_netcnt SEC(".maps") = {
	.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
};
	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
	__type(key, struct bpf_cgroup_storage_key);
	__type(value, struct percpu_net_cnt);
} percpu_netcnt SEC(".maps");

struct {
	__u32 type;
	struct bpf_cgroup_storage_key *key;
	struct net_cnt *value;
} netcnt SEC(".maps") = {
	.type = BPF_MAP_TYPE_CGROUP_STORAGE,
};
	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
	__type(key, struct bpf_cgroup_storage_key);
	__type(value, struct net_cnt);
} netcnt SEC(".maps");

SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
+5 −8
Original line number Diff line number Diff line
@@ -13,14 +13,11 @@ struct socket_cookie {
};

struct {
	__u32 type;
	__u32 map_flags;
	int *key;
	struct socket_cookie *value;
} socket_cookies SEC(".maps") = {
	.type = BPF_MAP_TYPE_SK_STORAGE,
	.map_flags = BPF_F_NO_PREALLOC,
};
	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
	__uint(map_flags, BPF_F_NO_PREALLOC);
	__type(key, int);
	__type(value, struct socket_cookie);
} socket_cookies SEC(".maps");

SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
+5 −8
Original line number Diff line number Diff line
@@ -21,14 +21,11 @@ struct bpf_map_def SEC("maps") btf_map_legacy = {
BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);

struct {
	int *key;
	struct ipv_counts *value;
	unsigned int type;
	unsigned int max_entries;
} btf_map SEC(".maps") = {
	.type = BPF_MAP_TYPE_ARRAY,
	.max_entries = 4,
};
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__uint(max_entries, 4);
	__type(key, int);
	__type(value, struct ipv_counts);
} btf_map SEC(".maps");

struct dummy_tracepoint_args {
	unsigned long long pad;
+14 −25
Original line number Diff line number Diff line
@@ -16,26 +16,18 @@ struct stack_trace_t {
};

struct {
	__u32 type;
	__u32 max_entries;
	__u32 key_size;
	__u32 value_size;
} perfmap SEC(".maps") = {
	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
	.max_entries = 2,
	.key_size = sizeof(int),
	.value_size = sizeof(__u32),
};
	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
	__uint(max_entries, 2);
	__uint(key_size, sizeof(int));
	__uint(value_size, sizeof(__u32));
} perfmap SEC(".maps");

struct {
	__u32 type;
	__u32 max_entries;
	__u32 *key;
	struct stack_trace_t *value;
} stackdata_map SEC(".maps") = {
	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
	.max_entries = 1,
};
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__uint(max_entries, 1);
	__type(key, __u32);
	__type(value, struct stack_trace_t);
} stackdata_map SEC(".maps");

/* Allocate per-cpu space twice the needed. For the code below
 *   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
@@ -56,14 +48,11 @@ struct {
 * This is an acceptable workaround since there is one entry here.
 */
struct {
	__u32 type;
	__u32 max_entries;
	__u32 *key;
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__uint(max_entries, 1);
	__type(key, __u32);
	__u64 (*value)[2 * MAX_STACK_RAWTP];
} rawdata_map SEC(".maps") = {
	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
	.max_entries = 1,
};
} rawdata_map SEC(".maps");

SEC("tracepoint/raw_syscalls/sys_enter")
int bpf_prog1(void *ctx)
Loading