Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c21a2a6 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-stack-tracker'



Alexei Starovoitov says:

====================
bpf: stack depth tracking

Introduce tracking of bpf program stack depth in the verifier and use that
info to reduce bpf program stack consumption in the interpreter and x64 JIT.
Other JITs can take advantage of it as well in the future.
Most of the programs consume very little stack, so it's good optimization
in general and it's the first step toward bpf to bpf function calls.

Also use internal opcode for bpf_tail_call() marking to make clear
that jmp|call|x opcode is not uapi and may be used for actual
indirect call opcode in the future.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d2e0ef49 2960ae48
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -586,7 +586,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
		break;
		break;
	}
	}
	/* tail call */
	/* tail call */
	case BPF_JMP | BPF_CALL | BPF_X:
	case BPF_JMP | BPF_TAIL_CALL:
		if (emit_bpf_tail_call(ctx))
		if (emit_bpf_tail_call(ctx))
			return -EFAULT;
			return -EFAULT;
		break;
		break;
+1 −1
Original line number Original line Diff line number Diff line
@@ -938,7 +938,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
		/*
		/*
		 * Tail call
		 * Tail call
		 */
		 */
		case BPF_JMP | BPF_CALL | BPF_X:
		case BPF_JMP | BPF_TAIL_CALL:
			ctx->seen |= SEEN_TAILCALL;
			ctx->seen |= SEEN_TAILCALL;
			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
			break;
			break;
+1 −1
Original line number Original line Diff line number Diff line
@@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
		}
		}
		break;
		break;
	}
	}
	case BPF_JMP | BPF_CALL | BPF_X:
	case BPF_JMP | BPF_TAIL_CALL:
		/*
		/*
		 * Implicit input:
		 * Implicit input:
		 *  B1: pointer to ctx
		 *  B1: pointer to ctx
+1 −1
Original line number Original line Diff line number Diff line
@@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
	}
	}


	/* tail call */
	/* tail call */
	case BPF_JMP | BPF_CALL |BPF_X:
	case BPF_JMP | BPF_TAIL_CALL:
		emit_tail_call(ctx);
		emit_tail_call(ctx);
		break;
		break;


+9 −11
Original line number Original line Diff line number Diff line
@@ -19,9 +19,6 @@
 */
 */
#define SKBDATA	%r10
#define SKBDATA	%r10
#define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
#define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
#define MAX_BPF_STACK (512 /* from filter.h */ + \
	32 /* space for rbx,r13,r14,r15 */ + \
	8 /* space for skb_copy_bits */)


#define FUNC(name) \
#define FUNC(name) \
	.globl name; \
	.globl name; \
@@ -66,7 +63,7 @@ FUNC(sk_load_byte_positive_offset)


/* rsi contains offset and can be scratched */
/* rsi contains offset and can be scratched */
#define bpf_slow_path_common(LEN)		\
#define bpf_slow_path_common(LEN)		\
	lea	-MAX_BPF_STACK + 32(%rbp), %rdx;\
	lea	32(%rbp), %rdx;\
	FRAME_BEGIN;				\
	FRAME_BEGIN;				\
	mov	%rbx, %rdi; /* arg1 == skb */	\
	mov	%rbx, %rdi; /* arg1 == skb */	\
	push	%r9;				\
	push	%r9;				\
@@ -83,14 +80,14 @@ FUNC(sk_load_byte_positive_offset)
bpf_slow_path_word:
bpf_slow_path_word:
	bpf_slow_path_common(4)
	bpf_slow_path_common(4)
	js	bpf_error
	js	bpf_error
	mov	- MAX_BPF_STACK + 32(%rbp),%eax
	mov	32(%rbp),%eax
	bswap	%eax
	bswap	%eax
	ret
	ret


bpf_slow_path_half:
bpf_slow_path_half:
	bpf_slow_path_common(2)
	bpf_slow_path_common(2)
	js	bpf_error
	js	bpf_error
	mov	- MAX_BPF_STACK + 32(%rbp),%ax
	mov	32(%rbp),%ax
	rol	$8,%ax
	rol	$8,%ax
	movzwl	%ax,%eax
	movzwl	%ax,%eax
	ret
	ret
@@ -98,7 +95,7 @@ bpf_slow_path_half:
bpf_slow_path_byte:
bpf_slow_path_byte:
	bpf_slow_path_common(1)
	bpf_slow_path_common(1)
	js	bpf_error
	js	bpf_error
	movzbl	- MAX_BPF_STACK + 32(%rbp),%eax
	movzbl	32(%rbp),%eax
	ret
	ret


#define sk_negative_common(SIZE)				\
#define sk_negative_common(SIZE)				\
@@ -148,9 +145,10 @@ FUNC(sk_load_byte_negative_offset)
bpf_error:
bpf_error:
# force a return 0 from jit handler
# force a return 0 from jit handler
	xor	%eax,%eax
	xor	%eax,%eax
	mov	- MAX_BPF_STACK(%rbp),%rbx
	mov	(%rbp),%rbx
	mov	- MAX_BPF_STACK + 8(%rbp),%r13
	mov	8(%rbp),%r13
	mov	- MAX_BPF_STACK + 16(%rbp),%r14
	mov	16(%rbp),%r14
	mov	- MAX_BPF_STACK + 24(%rbp),%r15
	mov	24(%rbp),%r15
	add	$40, %rbp
	leaveq
	leaveq
	ret
	ret
Loading