Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60005c60 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-next'



Daniel Borkmann says:

====================
BPF updates

[ Set applies on top of current net-next but also on top of
  Alexei's latest patches. Please see individual patches for
  more details. ]

Changelog:
 v1->v2:
  - Removed paragraph in 1st commit message
  - Rest stays the same
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ca777eff 286aad3c
Loading
Loading
Loading
Loading
+27 −7
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -174,6 +173,15 @@ static inline bool is_load_to_a(u16 inst)
	}
}

static void jit_fill_hole(void *area, unsigned int size)
{
	/* Insert illegal UND instructions. */
	u32 *ptr, fill_ins = 0xe7ffffff;
	/* We are guaranteed to have aligned memory. */
	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
		*ptr++ = fill_ins;
}

static void build_prologue(struct jit_ctx *ctx)
{
	u16 reg_set = saved_regs(ctx);
@@ -859,9 +867,11 @@ static int build_body(struct jit_ctx *ctx)

void bpf_jit_compile(struct bpf_prog *fp)
{
	struct bpf_binary_header *header;
	struct jit_ctx ctx;
	unsigned tmp_idx;
	unsigned alloc_size;
	u8 *target_ptr;

	if (!bpf_jit_enable)
		return;
@@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
	/* there's nothing after the epilogue on ARMv7 */
	build_epilogue(&ctx);
#endif

	alloc_size = 4 * ctx.idx;
	ctx.target = module_alloc(alloc_size);
	if (unlikely(ctx.target == NULL))
	header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
				      4, jit_fill_hole);
	if (header == NULL)
		goto out;

	ctx.target = (u32 *) target_ptr;
	ctx.idx = 0;

	build_prologue(&ctx);
	build_body(&ctx);
	build_epilogue(&ctx);
@@ -919,8 +931,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
		/* there are 2 passes here */
		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);

	set_memory_ro((unsigned long)header, header->pages);
	fp->bpf_func = (void *)ctx.target;
	fp->jited = 1;
	fp->jited = true;
out:
	kfree(ctx.offsets);
	return;
@@ -928,8 +941,15 @@ void bpf_jit_compile(struct bpf_prog *fp)

void bpf_jit_free(struct bpf_prog *fp)
{
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	if (!fp->jited)
		goto free_filter;

	set_memory_rw(addr, header->pages);
	bpf_jit_binary_free(header);

free_filter:
	bpf_prog_unlock_free(fp);
}
+1 −1
Original line number Diff line number Diff line
@@ -1417,7 +1417,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);

	fp->bpf_func = (void *)ctx.target;
	fp->jited = 1;
	fp->jited = true;

out:
	kfree(ctx.offsets);
+1 −1
Original line number Diff line number Diff line
@@ -686,7 +686,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
		((u64 *)image)[0] = (u64)code_base;
		((u64 *)image)[1] = local_paca->kernel_toc;
		fp->bpf_func = (void *)image;
		fp->jited = 1;
		fp->jited = true;
	}
out:
	kfree(addrs);
+10 −37
Original line number Diff line number Diff line
@@ -5,11 +5,9 @@
 *
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 */
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/filter.h>
#include <linux/random.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -148,6 +146,12 @@ struct bpf_jit {
	ret;						\
})

static void bpf_jit_fill_hole(void *area, unsigned int size)
{
	/* Fill whole space with illegal instructions */
	memset(area, 0, size);
}

static void bpf_jit_prologue(struct bpf_jit *jit)
{
	/* Save registers and create stack frame if necessary */
@@ -780,38 +784,6 @@ load_abs: if ((int) K < 0)
	return -1;
}

/*
 * Note: for security reasons, bpf code will follow a randomly
 *	 sized amount of illegal instructions.
 */
struct bpf_binary_header {
	unsigned int pages;
	u8 image[];
};

static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
						  u8 **image_ptr)
{
	struct bpf_binary_header *header;
	unsigned int sz, hole;

	/* Most BPF filters are really small, but if some of them fill a page,
	 * allow at least 128 extra bytes for illegal instructions.
	 */
	sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
	header = module_alloc(sz);
	if (!header)
		return NULL;
	memset(header, 0, sz);
	header->pages = sz / PAGE_SIZE;
	hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
	/* Insert random number of illegal instructions before BPF code
	 * and make sure the first instruction starts at an even address.
	 */
	*image_ptr = &header->image[(prandom_u32() % hole) & -2];
	return header;
}

void bpf_jit_compile(struct bpf_prog *fp)
{
	struct bpf_binary_header *header = NULL;
@@ -850,7 +822,8 @@ void bpf_jit_compile(struct bpf_prog *fp)
			size = prg_len + lit_len;
			if (size >= BPF_SIZE_MAX)
				goto out;
			header = bpf_alloc_binary(size, &jit.start);
			header = bpf_jit_binary_alloc(size, &jit.start,
						      2, bpf_jit_fill_hole);
			if (!header)
				goto out;
			jit.prg = jit.mid = jit.start + prg_len;
@@ -869,7 +842,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
	if (jit.start) {
		set_memory_ro((unsigned long)header, header->pages);
		fp->bpf_func = (void *) jit.start;
		fp->jited = 1;
		fp->jited = true;
	}
out:
	kfree(addrs);
@@ -884,7 +857,7 @@ void bpf_jit_free(struct bpf_prog *fp)
		goto free_filter;

	set_memory_rw(addr, header->pages);
	module_free(NULL, header);
	bpf_jit_binary_free(header);

free_filter:
	bpf_prog_unlock_free(fp);
+1 −1
Original line number Diff line number Diff line
@@ -801,7 +801,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
	if (image) {
		bpf_flush_icache(image, image + proglen);
		fp->bpf_func = (void *)image;
		fp->jited = 1;
		fp->jited = true;
	}
out:
	kfree(addrs);
Loading