Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 485b7778 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-blinding'



Daniel Borkmann says:

====================
BPF updates

This set implements constant blinding for BPF, first couple of
patches are some preparatory cleanups, followed by the blinding.
Please see individual patches for details.

Thanks a lot!
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 553eb544 d93a47f7
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -43,6 +43,17 @@ Values :
	1 - enable the JIT
	2 - enable the JIT and ask the compiler to emit traces on kernel log.

bpf_jit_harden
--------------

This enables hardening for the Berkeley Packet Filter Just in Time compiler.
Supported are eBPF JIT backends. Enabling hardening trades off performance,
but can mitigate JIT spraying.
Values :
	0 - disable JIT hardening (default value)
	1 - enable JIT hardening for unprivileged users only
	2 - enable JIT hardening for all users

dev_weight
--------------

+1 −1
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ config ARM
	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
	select HAVE_ARCH_TRACEHOOK
	select HAVE_ARM_SMCCC if CPU_V7
	select HAVE_BPF_JIT
	select HAVE_CBPF_JIT
	select HAVE_CC_STACKPROTECTOR
	select HAVE_CONTEXT_TRACKING
	select HAVE_C_RECORDMCOUNT
+1 −1
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ config ARM64
	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
	select HAVE_ARCH_SECCOMP_FILTER
	select HAVE_ARCH_TRACEHOOK
	select HAVE_BPF_JIT
	select HAVE_EBPF_JIT
	select HAVE_C_RECORDMCOUNT
	select HAVE_CC_STACKPROTECTOR
	select HAVE_CMPXCHG_DOUBLE
+41 −15
Original line number Diff line number Diff line
@@ -31,8 +31,8 @@

int bpf_jit_enable __read_mostly;

#define TMP_REG_1 (MAX_BPF_REG + 0)
#define TMP_REG_2 (MAX_BPF_REG + 1)
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)

/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
@@ -54,6 +54,8 @@ static const int bpf2a64[] = {
	/* temporary register for internal BPF JIT */
	[TMP_REG_1] = A64_R(23),
	[TMP_REG_2] = A64_R(24),
	/* temporary register for blinding constants */
	[BPF_REG_AX] = A64_R(9),
};

struct jit_ctx {
@@ -762,31 +764,45 @@ void bpf_jit_compile(struct bpf_prog *prog)
	/* Nothing to do here. We support Internal BPF. */
}

void bpf_int_jit_compile(struct bpf_prog *prog)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
	struct bpf_prog *tmp, *orig_prog = prog;
	struct bpf_binary_header *header;
	bool tmp_blinded = false;
	struct jit_ctx ctx;
	int image_size;
	u8 *image_ptr;

	if (!bpf_jit_enable)
		return;
		return orig_prog;

	if (!prog || !prog->len)
		return;
	tmp = bpf_jit_blind_constants(prog);
	/* If blinding was requested and we failed during blinding,
	 * we must fall back to the interpreter.
	 */
	if (IS_ERR(tmp))
		return orig_prog;
	if (tmp != prog) {
		tmp_blinded = true;
		prog = tmp;
	}

	memset(&ctx, 0, sizeof(ctx));
	ctx.prog = prog;

	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
	if (ctx.offset == NULL)
		return;
	if (ctx.offset == NULL) {
		prog = orig_prog;
		goto out;
	}

	/* 1. Initial fake pass to compute ctx->idx. */

	/* Fake pass to fill in ctx->offset and ctx->tmp_used. */
	if (build_body(&ctx))
		goto out;
	if (build_body(&ctx)) {
		prog = orig_prog;
		goto out_off;
	}

	build_prologue(&ctx);

@@ -797,8 +813,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
	image_size = sizeof(u32) * ctx.idx;
	header = bpf_jit_binary_alloc(image_size, &image_ptr,
				      sizeof(u32), jit_fill_hole);
	if (header == NULL)
		goto out;
	if (header == NULL) {
		prog = orig_prog;
		goto out_off;
	}

	/* 2. Now, the actual pass. */

@@ -809,7 +827,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)

	if (build_body(&ctx)) {
		bpf_jit_binary_free(header);
		goto out;
		prog = orig_prog;
		goto out_off;
	}

	build_epilogue(&ctx);
@@ -817,7 +836,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
	/* 3. Extra pass to validate JITed code. */
	if (validate_code(&ctx)) {
		bpf_jit_binary_free(header);
		goto out;
		prog = orig_prog;
		goto out_off;
	}

	/* And we're done. */
@@ -829,8 +849,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
	set_memory_ro((unsigned long)header, header->pages);
	prog->bpf_func = (void *)ctx.image;
	prog->jited = 1;
out:

out_off:
	kfree(ctx.offset);
out:
	if (tmp_blinded)
		bpf_jit_prog_release_other(prog, prog == orig_prog ?
					   tmp : orig_prog);
	return prog;
}

void bpf_jit_free(struct bpf_prog *prog)
+1 −1
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@ config MIPS
	select HAVE_ARCH_KGDB
	select HAVE_ARCH_SECCOMP_FILTER
	select HAVE_ARCH_TRACEHOOK
	select HAVE_BPF_JIT if !CPU_MICROMIPS
	select HAVE_CBPF_JIT if !CPU_MICROMIPS
	select HAVE_FUNCTION_TRACER
	select HAVE_DYNAMIC_FTRACE
	select HAVE_FTRACE_MCOUNT_RECORD
Loading