Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e339756c authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'filter-next'



Alexei Starovoitov says:

====================
net: filter: split sk_filter into socket and bpf, cleanup names

The main goal of the series is to split 'struct sk_filter' into socket and
bpf parts and cleanup names in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix

split 'struct sk_filter' into
struct sk_filter {
	atomic_t        refcnt;
	struct rcu_head rcu;
	struct bpf_prog *prog;
};
and
struct bpf_prog {
        u32                     jited:1,
                                len:31;
        struct sock_fprog_kern  *orig_prog;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct bpf_insn *filter);
        union {
                struct sock_filter      insns[0];
                struct bpf_insn         insnsi[0];
                struct work_struct      work;
        };
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases:
isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
which don't need refcnt/rcu fields.

It's a follow up to the rcu cleanup started by Pablo in
commit 34c5bd66 ("net: filter: don't release unattached filter through call_rcu()")

Patch 1 - cleans up socket memory charging and makes it possible for functions
  sk(bpf)_migrate_filter(), sk(bpf)_prepare_filter() to be socket independent
Patches 2-4 - trivial renames
Patch 5 - sk_filter split and renames of related sk_*() functions
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4330487a 7ae457c1
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -586,12 +586,12 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
extension, PTP dissector/classifier, and much more. They are all internally
converted by the kernel into the new instruction set representation and run
in the eBPF interpreter. For in-kernel handlers, this all works transparently
by using sk_unattached_filter_create() for setting up the filter, resp.
sk_unattached_filter_destroy() for destroying it. The macro
SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
code to run the filter. 'filter' is a pointer to struct sk_filter that we
got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
skb pointer). All constraints and restrictions from sk_chk_filter() apply
by using bpf_prog_create() for setting up the filter, resp.
bpf_prog_destroy() for destroying it. The macro
BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
code to run the filter. 'filter' is a pointer to struct bpf_prog that we
got from bpf_prog_create(), and 'ctx' the given context (e.g.
skb pointer). All constraints and restrictions from bpf_check_classic() apply
before a conversion to the new layout is being done behind the scenes!

Currently, the classic BPF format is being used for JITing on most of the
+4 −4
Original line number Diff line number Diff line
@@ -56,7 +56,7 @@
#define FLAG_NEED_X_RESET	(1 << 0)

struct jit_ctx {
	const struct sk_filter *skf;
	const struct bpf_prog *skf;
	unsigned idx;
	unsigned prologue_bytes;
	int ret0_fp_idx;
@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
{
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	const struct sk_filter *prog = ctx->skf;
	const struct bpf_prog *prog = ctx->skf;
	const struct sock_filter *inst;
	unsigned i, load_order, off, condt;
	int imm12;
@@ -857,7 +857,7 @@ static int build_body(struct jit_ctx *ctx)
}


void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
	struct jit_ctx ctx;
	unsigned tmp_idx;
@@ -926,7 +926,7 @@ void bpf_jit_compile(struct sk_filter *fp)
	return;
}

void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
+4 −4
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@
 * @target:		Memory location for the compiled filter
 */
struct jit_ctx {
	const struct sk_filter *skf;
	const struct bpf_prog *skf;
	unsigned int prologue_bytes;
	u32 idx;
	u32 flags;
@@ -789,7 +789,7 @@ static int pkt_type_offset(void)
static int build_body(struct jit_ctx *ctx)
{
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	const struct sk_filter *prog = ctx->skf;
	const struct bpf_prog *prog = ctx->skf;
	const struct sock_filter *inst;
	unsigned int i, off, load_order, condt;
	u32 k, b_off __maybe_unused;
@@ -1369,7 +1369,7 @@ static int build_body(struct jit_ctx *ctx)

int bpf_jit_enable __read_mostly;

void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
	struct jit_ctx ctx;
	unsigned int alloc_size, tmp_idx;
@@ -1423,7 +1423,7 @@ void bpf_jit_compile(struct sk_filter *fp)
	kfree(ctx.offsets);
}

void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
+4 −4
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
	flush_icache_range((unsigned long)start, (unsigned long)end);
}

static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
				   struct codegen_context *ctx)
{
	int i;
@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)

/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
			      struct codegen_context *ctx,
			      unsigned int *addrs)
{
@@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
	return 0;
}

void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
	unsigned int proglen;
	unsigned int alloclen;
@@ -693,7 +693,7 @@ void bpf_jit_compile(struct sk_filter *fp)
	return;
}

void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
+2 −2
Original line number Diff line number Diff line
@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
	return header;
}

void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
	struct bpf_binary_header *header = NULL;
	unsigned long size, prg_len, lit_len;
@@ -875,7 +875,7 @@ void bpf_jit_compile(struct sk_filter *fp)
	kfree(addrs);
}

void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;
Loading