Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1d9423ae authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-direct-packet-access-improvements'



Daniel Borkmann says:

====================
BPF direct packet access improvements

This set adds write support to the currently available read support
for {cls,act}_bpf programs. First one is a fix for affected commit
sitting in net-next and prerequisite for the second one, last patch
adds a number of test cases against the verifier. For details, please
see individual patches.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cf714ac1 7d95b0ab
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
	bool gpl_only;
	bool pkt_access;
	enum bpf_return_type ret_type;
	enum bpf_arg_type arg1_type;
	enum bpf_arg_type arg2_type;
@@ -151,7 +152,8 @@ struct bpf_verifier_ops {
	 */
	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
				enum bpf_reg_type *reg_type);

	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
			    const struct bpf_prog *prog);
	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
				  int src_reg, int ctx_off,
				  struct bpf_insn *insn, struct bpf_prog *prog);
+12 −2
Original line number Diff line number Diff line
@@ -676,13 +676,23 @@ struct sk_buff {
	 */
	kmemcheck_bitfield_begin(flags1);
	__u16			queue_mapping;

/* if you move cloned around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define CLONED_MASK	(1 << 7)
#else
#define CLONED_MASK	1
#endif
#define CLONED_OFFSET()		offsetof(struct sk_buff, __cloned_offset)

	__u8			__cloned_offset[0];
	__u8			cloned:1,
				nohdr:1,
				fclone:2,
				peeked:1,
				head_frag:1,
				xmit_more:1;
	/* one bit hole */
				xmit_more:1,
				__unused:1; /* one bit hole */
	kmemcheck_bitfield_end(flags1);

	/* fields enclosed in headers_start/headers_end are copied
+21 −0
Original line number Diff line number Diff line
@@ -398,6 +398,27 @@ enum bpf_func_id {
	 */
	BPF_FUNC_skb_change_tail,

	/**
	 * bpf_skb_pull_data(skb, len)
	 * The helper will pull in non-linear data in case the
	 * skb is non-linear and not all of len are part of the
	 * linear section. Only needed for read/write with direct
	 * packet access.
	 * @skb: pointer to skb
	 * @len: len to make read/writeable
	 * Return: 0 on success or negative error
	 */
	BPF_FUNC_skb_pull_data,

	/**
	 * bpf_csum_update(skb, csum)
	 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
	 * @skb: pointer to skb
	 * @csum: csum to add
	 * Return: csum on success or negative error
	 */
	BPF_FUNC_csum_update,

	__BPF_FUNC_MAX_ID,
};

+3 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
	.func		= bpf_map_lookup_elem,
	.gpl_only	= false,
	.pkt_access	= true,
	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
	.arg1_type	= ARG_CONST_MAP_PTR,
	.arg2_type	= ARG_PTR_TO_MAP_KEY,
@@ -51,6 +52,7 @@ BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
const struct bpf_func_proto bpf_map_update_elem_proto = {
	.func		= bpf_map_update_elem,
	.gpl_only	= false,
	.pkt_access	= true,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_CONST_MAP_PTR,
	.arg2_type	= ARG_PTR_TO_MAP_KEY,
@@ -67,6 +69,7 @@ BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
const struct bpf_func_proto bpf_map_delete_elem_proto = {
	.func		= bpf_map_delete_elem,
	.gpl_only	= false,
	.pkt_access	= true,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_CONST_MAP_PTR,
	.arg2_type	= ARG_PTR_TO_MAP_KEY,
+41 −15
Original line number Diff line number Diff line
@@ -196,6 +196,7 @@ struct verifier_env {
	u32 used_map_cnt;		/* number of used maps */
	u32 id_gen;			/* used to generate unique reg IDs */
	bool allow_ptr_leaks;
	bool seen_direct_write;
};

#define BPF_COMPLEXITY_LIMIT_INSNS	65536
@@ -204,6 +205,7 @@ struct verifier_env {
struct bpf_call_arg_meta {
	struct bpf_map *map_ptr;
	bool raw_mode;
	bool pkt_access;
	int regno;
	int access_size;
};
@@ -654,10 +656,17 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off,

#define MAX_PACKET_OFF 0xffff

static bool may_write_pkt_data(enum bpf_prog_type type)
static bool may_access_direct_pkt_data(struct verifier_env *env,
				       const struct bpf_call_arg_meta *meta)
{
	switch (type) {
	switch (env->prog->type) {
	case BPF_PROG_TYPE_SCHED_CLS:
	case BPF_PROG_TYPE_SCHED_ACT:
	case BPF_PROG_TYPE_XDP:
		if (meta)
			return meta->pkt_access;

		env->seen_direct_write = true;
		return true;
	default:
		return false;
@@ -671,7 +680,7 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
	struct reg_state *reg = &regs[regno];

	off += reg->off;
	if (off < 0 || off + size > reg->range) {
	if (off < 0 || size <= 0 || off + size > reg->range) {
		verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
			off, size, regno, reg->id, reg->off, reg->range);
		return -EACCES;
@@ -817,7 +826,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
			err = check_stack_read(state, off, size, value_regno);
		}
	} else if (state->regs[regno].type == PTR_TO_PACKET) {
		if (t == BPF_WRITE && !may_write_pkt_data(env->prog->type)) {
		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL)) {
			verbose("cannot write into packet\n");
			return -EACCES;
		}
@@ -950,8 +959,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
		return 0;
	}

	if (type == PTR_TO_PACKET && !may_write_pkt_data(env->prog->type)) {
		verbose("helper access to the packet is not allowed for clsact\n");
	if (type == PTR_TO_PACKET && !may_access_direct_pkt_data(env, meta)) {
		verbose("helper access to the packet is not allowed\n");
		return -EACCES;
	}

@@ -1191,6 +1200,7 @@ static int check_call(struct verifier_env *env, int func_id)
	changes_data = bpf_helper_changes_skb_data(fn->func);

	memset(&meta, 0, sizeof(meta));
	meta.pkt_access = fn->pkt_access;

	/* We only support one arg being in raw mode at the moment, which
	 * is sufficient for the helper functions we have right now.
@@ -2675,18 +2685,35 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
 */
static int convert_ctx_accesses(struct verifier_env *env)
{
	struct bpf_insn *insn = env->prog->insnsi;
	int insn_cnt = env->prog->len;
	struct bpf_insn insn_buf[16];
	const struct bpf_verifier_ops *ops = env->prog->aux->ops;
	struct bpf_insn insn_buf[16], *insn;
	struct bpf_prog *new_prog;
	enum bpf_access_type type;
	int i;
	int i, insn_cnt, cnt;

	if (!env->prog->aux->ops->convert_ctx_access)
	if (ops->gen_prologue) {
		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
					env->prog);
		if (cnt >= ARRAY_SIZE(insn_buf)) {
			verbose("bpf verifier is misconfigured\n");
			return -EINVAL;
		} else if (cnt) {
			new_prog = bpf_patch_insn_single(env->prog, 0,
							 insn_buf, cnt);
			if (!new_prog)
				return -ENOMEM;
			env->prog = new_prog;
		}
	}

	if (!ops->convert_ctx_access)
		return 0;

	insn_cnt = env->prog->len;
	insn = env->prog->insnsi;

	for (i = 0; i < insn_cnt; i++, insn++) {
		u32 insn_delta, cnt;
		u32 insn_delta;

		if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
@@ -2703,8 +2730,7 @@ static int convert_ctx_accesses(struct verifier_env *env)
			continue;
		}

		cnt = env->prog->aux->ops->
			convert_ctx_access(type, insn->dst_reg, insn->src_reg,
		cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
					      insn->off, insn_buf, env->prog);
		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
			verbose("bpf verifier is misconfigured\n");
Loading