Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d73e5f41 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-misc-updates'



Daniel Borkmann says:

====================
Misc BPF updates

This series contains a couple of misc updates to the BPF code, besides
others a new helper bpf_skb_load_bytes(), moving clearing of A/X to the
classic converter, etc. Please see individual patches for details.

Thanks!
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 59ce9670 9dd2af83
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -152,8 +152,6 @@ static void build_prologue(struct jit_ctx *ctx)
	const u8 r8 = bpf2a64[BPF_REG_8];
	const u8 r9 = bpf2a64[BPF_REG_9];
	const u8 fp = bpf2a64[BPF_REG_FP];
	const u8 ra = bpf2a64[BPF_REG_A];
	const u8 rx = bpf2a64[BPF_REG_X];
	const u8 tmp1 = bpf2a64[TMP_REG_1];
	const u8 tmp2 = bpf2a64[TMP_REG_2];

@@ -200,10 +198,6 @@ static void build_prologue(struct jit_ctx *ctx)

	/* Set up function call stack */
	emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);

	/* Clear registers A and X */
	emit_a64_mov_i64(ra, 0, ctx);
	emit_a64_mov_i64(rx, 0, ctx);
}

static void build_epilogue(struct jit_ctx *ctx)
+2 −11
Original line number Diff line number Diff line
@@ -408,7 +408,7 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 * Save registers and create stack frame if necessary.
 * See stack frame layout desription in "bpf_jit.h"!
 */
static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
static void bpf_jit_prologue(struct bpf_jit *jit)
{
	if (jit->seen & SEEN_TAIL_CALL) {
		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -448,15 +448,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
			      STK_OFF_SKBP);
	/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
	if (is_classic) {
		if (REG_SEEN(BPF_REG_A))
			/* lghi %ba,0 */
			EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
		if (REG_SEEN(BPF_REG_X))
			/* lghi %bx,0 */
			EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
	}
}

/*
@@ -1245,7 +1236,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
	jit->lit = jit->lit_start;
	jit->prg = 0;

	bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
	bpf_jit_prologue(jit);
	for (i = 0; i < fp->len; i += insn_count) {
		insn_count = bpf_jit_insn(jit, fp, i);
		if (insn_count < 0)
+35 −5
Original line number Diff line number Diff line
@@ -193,7 +193,7 @@ struct jit_context {
	 32 /* space for rbx, r13, r14, r15 */ + \
	 8 /* space for skb_copy_bits() buffer */)

#define PROLOGUE_SIZE 51
#define PROLOGUE_SIZE 48

/* emit x64 prologue code for BPF program and check it's size.
 * bpf_tail_call helper will skip it while jumping into another program
@@ -229,11 +229,15 @@ static void emit_prologue(u8 **pprog)
	/* mov qword ptr [rbp-X],r15 */
	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);

	/* clear A and X registers */
	EMIT2(0x31, 0xc0); /* xor eax, eax */
	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
	/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
	 * we need to reset the counter to 0. It's done in two instructions,
	 * resetting rax register to 0 (xor on eax gets 0 extended), and
	 * moving it to the counter location.
	 */

	/* clear tail_cnt: mov qword ptr [rbp-X], rax */
	/* xor eax, eax */
	EMIT2(0x31, 0xc0);
	/* mov qword ptr [rbp-X], rax */
	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);

	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
@@ -455,6 +459,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
			}

		case BPF_ALU | BPF_MOV | BPF_K:
			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
			 * to save 3 bytes.
			 */
			if (imm32 == 0) {
				if (is_ereg(dst_reg))
					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
				b2 = 0x31; /* xor */
				b3 = 0xC0;
				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
				break;
			}

			/* mov %eax, imm32 */
			if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
@@ -469,6 +485,20 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
				return -EINVAL;
			}

			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
			 * to save 7 bytes.
			 */
			if (insn[0].imm == 0 && insn[1].imm == 0) {
				b1 = add_2mod(0x48, dst_reg, dst_reg);
				b2 = 0x31; /* xor */
				b3 = 0xC0;
				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));

				insn++;
				i++;
				break;
			}

			/* movabsq %rax, imm64 */
			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
			EMIT(insn[0].imm, 4);
+1 −0
Original line number Diff line number Diff line
@@ -269,6 +269,7 @@ enum bpf_func_id {
	 * Return: 0 on success
	 */
	BPF_FUNC_perf_event_output,
	BPF_FUNC_skb_load_bytes,
	__BPF_FUNC_MAX_ID,
};

+0 −4
Original line number Diff line number Diff line
@@ -306,10 +306,6 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
	ARG1 = (u64) (unsigned long) ctx;

	/* Registers used in classic BPF programs need to be reset first. */
	regs[BPF_REG_A] = 0;
	regs[BPF_REG_X] = 0;

select_insn:
	goto *jumptable[insn->code];

Loading