Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2b4eb55 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-misc'



Daniel Borkmann says:

====================
Misc BPF improvements

This last series for this window adds various misc
improvements to BPF, one is to mark registered map and
prog types as __ro_after_init, another one for removing
cBPF stubs in eBPF JITs and moving the stub to the core
and last also improving JITs is to make generated images
visible to the kernel and kallsyms, so they can be
seen in traces. For details, please have a look at the
individual patches.

Thanks a lot!
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents afcb50ba 74451e66
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -54,6 +54,18 @@ Values :
	1 - enable JIT hardening for unprivileged users only
	2 - enable JIT hardening for all users

bpf_jit_kallsyms
----------------

When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
images are unknown addresses to the kernel, meaning they neither show up in
traces nor in /proc/kallsyms. This enables export of these addresses, which
can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
is disabled.
Values :
	0 - disable JIT kallsyms export (default value)
	1 - enable JIT kallsyms export for privileged users only

dev_weight
--------------

+0 −20
Original line number Diff line number Diff line
@@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end)
	flush_icache_range((unsigned long)start, (unsigned long)end);
}

void bpf_jit_compile(struct bpf_prog *prog)
{
	/* Nothing to do here. We support Internal BPF. */
}

struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
	struct bpf_prog *tmp, *orig_prog = prog;
@@ -915,18 +910,3 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
					   tmp : orig_prog);
	return prog;
}

void bpf_jit_free(struct bpf_prog *prog)
{
	unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	if (!prog->jited)
		goto free_filter;

	set_memory_rw(addr, header->pages);
	bpf_jit_binary_free(header);

free_filter:
	bpf_prog_unlock_free(prog);
}
+1 −2
Original line number Diff line number Diff line
@@ -961,8 +961,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
	return 0;
}

void bpf_jit_compile(struct bpf_prog *fp) { }

struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
	u32 proglen;
@@ -1066,6 +1064,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
	return fp;
}

/* Overriding bpf_jit_free() as we don't set images read-only. */
void bpf_jit_free(struct bpf_prog *fp)
{
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+0 −26
Original line number Diff line number Diff line
@@ -1262,14 +1262,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
	return 0;
}

/*
 * Classic BPF function stub. BPF programs will be converted into
 * eBPF and then bpf_int_jit_compile() will be called.
 */
void bpf_jit_compile(struct bpf_prog *fp)
{
}

/*
 * Compile eBPF program "fp"
 */
@@ -1347,21 +1339,3 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
					   tmp : orig_fp);
	return fp;
}

/*
 * Free eBPF program
 */
void bpf_jit_free(struct bpf_prog *fp)
{
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	if (!fp->jited)
		goto free_filter;

	set_memory_rw(addr, header->pages);
	bpf_jit_binary_free(header);

free_filter:
	bpf_prog_unlock_free(fp);
}
+2 −21
Original line number Diff line number Diff line
@@ -1067,13 +1067,13 @@ xadd: if (is_imm8(insn->off))

		ilen = prog - temp;
		if (ilen > BPF_MAX_INSN_SIZE) {
			pr_err("bpf_jit_compile fatal insn size error\n");
			pr_err("bpf_jit: fatal insn size error\n");
			return -EFAULT;
		}

		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
				pr_err("bpf_jit_compile fatal error\n");
				pr_err("bpf_jit: fatal error\n");
				return -EFAULT;
			}
			memcpy(image + proglen, temp, ilen);
@@ -1085,10 +1085,6 @@ xadd: if (is_imm8(insn->off))
	return proglen;
}

void bpf_jit_compile(struct bpf_prog *prog)
{
}

struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
	struct bpf_binary_header *header = NULL;
@@ -1184,18 +1180,3 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
					   tmp : orig_prog);
	return prog;
}

void bpf_jit_free(struct bpf_prog *fp)
{
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	if (!fp->jited)
		goto free_filter;

	set_memory_rw(addr, header->pages);
	bpf_jit_binary_free(header);

free_filter:
	bpf_prog_unlock_free(fp);
}
Loading