Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 407be8d0 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'narrow-loads'



Andrey Ignatov says:

====================
This patch set adds support for narrow loads with offset > 0 to BPF
verifier.

Patch 1 provides more details and is the main patch in the set.
Patches 2 and 3 add new test cases to test_verifier and test_sock_addr
selftests.

v1->v2:
- fix -Wdeclaration-after-statement warning.
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents f2cbf958 e7605475
Loading
Loading
Loading
Loading
+1 −15
Original line number Diff line number Diff line
@@ -668,24 +668,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
	return size;
}

static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
					   u32 size_default)
{
	size_default = bpf_ctx_off_adjust_machine(size_default);
	size_access  = bpf_ctx_off_adjust_machine(size_access);

#ifdef __LITTLE_ENDIAN
	return (off & (size_default - 1)) == 0;
#else
	return (off & (size_default - 1)) + size_access == size_default;
#endif
}

static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
	return bpf_ctx_narrow_align_ok(off, size, size_default) &&
	       size <= size_default && (size & (size - 1)) == 0;
	return size <= size_default && (size & (size - 1)) == 0;
}

#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+16 −5
Original line number Diff line number Diff line
@@ -5718,10 +5718,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
	int i, cnt, size, ctx_field_size, delta = 0;
	const int insn_cnt = env->prog->len;
	struct bpf_insn insn_buf[16], *insn;
	u32 target_size, size_default, off;
	struct bpf_prog *new_prog;
	enum bpf_access_type type;
	bool is_narrower_load;
	u32 target_size;

	if (ops->gen_prologue || env->seen_direct_write) {
		if (!ops->gen_prologue) {
@@ -5814,9 +5814,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
		 * we will apply proper mask to the result.
		 */
		is_narrower_load = size < ctx_field_size;
		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
		off = insn->off;
		if (is_narrower_load) {
			u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
			u32 off = insn->off;
			u8 size_code;

			if (type == BPF_WRITE) {
@@ -5844,13 +5844,24 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
		}

		if (is_narrower_load && size < target_size) {
			if (ctx_field_size <= 4)
			u8 shift = (off & (size_default - 1)) * 8;

			if (ctx_field_size <= 4) {
				if (shift)
					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
									insn->dst_reg,
									shift);
				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
								(1 << size * 8) - 1);
			else
			} else {
				if (shift)
					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
									insn->dst_reg,
									shift);
				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
								(1 << size * 8) - 1);
			}
		}

		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
		if (!new_prog)
+24 −4
Original line number Diff line number Diff line
@@ -574,24 +574,44 @@ static int bind4_prog_load(const struct sock_addr_test *test)
		/* if (sk.family == AF_INET && */
		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, family)),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 16),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 24),

		/*     (sk.type == SOCK_DGRAM || sk.type == SOCK_STREAM) && */
		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, type)),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 1),
		BPF_JMP_A(1),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 12),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 20),

		/*     1st_byte_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4)),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 10),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 18),

		/*     2nd_byte_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4) + 1),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[1], 16),

		/*     3rd_byte_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4) + 2),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[2], 14),

		/*     4th_byte_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4) + 3),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[3], 12),

		/*     1st_half_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4)),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 8),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 10),

		/*     2nd_half_of_user_ip4 == expected && */
		BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
			    offsetof(struct bpf_sock_addr, user_ip4) + 2),
		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[1], 8),

		/*     whole_user_ip4 == expected) { */
		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+38 −10
Original line number Diff line number Diff line
@@ -2026,29 +2026,27 @@ static struct bpf_test tests[] = {
		.result = ACCEPT,
	},
	{
		"check skb->hash byte load not permitted 1",
		"check skb->hash byte load permitted 1",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 1),
			BPF_EXIT_INSN(),
		},
		.errstr = "invalid bpf_context access",
		.result = REJECT,
		.result = ACCEPT,
	},
	{
		"check skb->hash byte load not permitted 2",
		"check skb->hash byte load permitted 2",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 2),
			BPF_EXIT_INSN(),
		},
		.errstr = "invalid bpf_context access",
		.result = REJECT,
		.result = ACCEPT,
	},
	{
		"check skb->hash byte load not permitted 3",
		"check skb->hash byte load permitted 3",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2060,8 +2058,7 @@ static struct bpf_test tests[] = {
#endif
			BPF_EXIT_INSN(),
		},
		.errstr = "invalid bpf_context access",
		.result = REJECT,
		.result = ACCEPT,
	},
	{
		"check cb access: byte, wrong type",
@@ -2173,7 +2170,7 @@ static struct bpf_test tests[] = {
		.result = ACCEPT,
	},
	{
		"check skb->hash half load not permitted",
		"check skb->hash half load permitted 2",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2182,6 +2179,37 @@ static struct bpf_test tests[] = {
#else
			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash)),
#endif
			BPF_EXIT_INSN(),
		},
		.result = ACCEPT,
	},
	{
		"check skb->hash half load not permitted, unaligned 1",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 1),
#else
			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 3),
#endif
			BPF_EXIT_INSN(),
		},
		.errstr = "invalid bpf_context access",
		.result = REJECT,
	},
	{
		"check skb->hash half load not permitted, unaligned 3",
		.insns = {
			BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 3),
#else
			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
				    offsetof(struct __sk_buff, hash) + 1),
#endif
			BPF_EXIT_INSN(),
		},