Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ae457c1 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller
Browse files

net: filter: split 'struct sk_filter' into socket and bpf parts



clean up names related to socket filtering and bpf in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix

split 'struct sk_filter' into
struct sk_filter {
	atomic_t        refcnt;
	struct rcu_head rcu;
	struct bpf_prog *prog;
};
and
struct bpf_prog {
        u32                     jited:1,
                                len:31;
        struct sock_fprog_kern  *orig_prog;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct bpf_insn *filter);
        union {
                struct sock_filter      insns[0];
                struct bpf_insn         insnsi[0];
                struct work_struct      work;
        };
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases

split SK_RUN_FILTER macro into:
    SK_RUN_FILTER to be used with 'struct sk_filter *' and
    BPF_PROG_RUN to be used with 'struct bpf_prog *'

__sk_filter_release(struct sk_filter *) gains
__bpf_prog_release(struct bpf_prog *) helper function

also perform related renames for the functions that work
with 'struct bpf_prog *', since they're on the same lines:

sk_filter_size -> bpf_prog_size
sk_filter_select_runtime -> bpf_prog_select_runtime
sk_filter_free -> bpf_prog_free
sk_unattached_filter_create -> bpf_prog_create
sk_unattached_filter_destroy -> bpf_prog_destroy
sk_store_orig_filter -> bpf_prog_store_orig_filter
sk_release_orig_filter -> bpf_release_orig_filter
__sk_migrate_filter -> bpf_migrate_filter
__sk_prepare_filter -> bpf_prepare_filter

API for attaching classic BPF to a socket stays the same:
sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
which is used by sockets, tun, af_packet

API for 'unattached' BPF programs becomes:
bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf

Signed-off-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8fb575ca
Loading
Loading
Loading
Loading
+5 −5
Original line number Original line Diff line number Diff line
@@ -586,11 +586,11 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
extension, PTP dissector/classifier, and much more. They are all internally
extension, PTP dissector/classifier, and much more. They are all internally
converted by the kernel into the new instruction set representation and run
converted by the kernel into the new instruction set representation and run
in the eBPF interpreter. For in-kernel handlers, this all works transparently
in the eBPF interpreter. For in-kernel handlers, this all works transparently
by using sk_unattached_filter_create() for setting up the filter, resp.
by using bpf_prog_create() for setting up the filter, resp.
sk_unattached_filter_destroy() for destroying it. The macro
bpf_prog_destroy() for destroying it. The macro
SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
code to run the filter. 'filter' is a pointer to struct sk_filter that we
code to run the filter. 'filter' is a pointer to struct bpf_prog that we
got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
got from bpf_prog_create(), and 'ctx' the given context (e.g.
skb pointer). All constraints and restrictions from bpf_check_classic() apply
skb pointer). All constraints and restrictions from bpf_check_classic() apply
before a conversion to the new layout is being done behind the scenes!
before a conversion to the new layout is being done behind the scenes!


+4 −4
Original line number Original line Diff line number Diff line
@@ -56,7 +56,7 @@
#define FLAG_NEED_X_RESET	(1 << 0)
#define FLAG_NEED_X_RESET	(1 << 0)


struct jit_ctx {
struct jit_ctx {
	const struct sk_filter *skf;
	const struct bpf_prog *skf;
	unsigned idx;
	unsigned idx;
	unsigned prologue_bytes;
	unsigned prologue_bytes;
	int ret0_fp_idx;
	int ret0_fp_idx;
@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
{
{
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	const struct sk_filter *prog = ctx->skf;
	const struct bpf_prog *prog = ctx->skf;
	const struct sock_filter *inst;
	const struct sock_filter *inst;
	unsigned i, load_order, off, condt;
	unsigned i, load_order, off, condt;
	int imm12;
	int imm12;
@@ -857,7 +857,7 @@ b_epilogue:
}
}




void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
{
	struct jit_ctx ctx;
	struct jit_ctx ctx;
	unsigned tmp_idx;
	unsigned tmp_idx;
@@ -926,7 +926,7 @@ out:
	return;
	return;
}
}


void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
{
	if (fp->jited)
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
		module_free(NULL, fp->bpf_func);
+4 −4
Original line number Original line Diff line number Diff line
@@ -131,7 +131,7 @@
 * @target:		Memory location for the compiled filter
 * @target:		Memory location for the compiled filter
 */
 */
struct jit_ctx {
struct jit_ctx {
	const struct sk_filter *skf;
	const struct bpf_prog *skf;
	unsigned int prologue_bytes;
	unsigned int prologue_bytes;
	u32 idx;
	u32 idx;
	u32 flags;
	u32 flags;
@@ -789,7 +789,7 @@ static int pkt_type_offset(void)
static int build_body(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
{
{
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
	const struct sk_filter *prog = ctx->skf;
	const struct bpf_prog *prog = ctx->skf;
	const struct sock_filter *inst;
	const struct sock_filter *inst;
	unsigned int i, off, load_order, condt;
	unsigned int i, off, load_order, condt;
	u32 k, b_off __maybe_unused;
	u32 k, b_off __maybe_unused;
@@ -1369,7 +1369,7 @@ jmp_cmp:


int bpf_jit_enable __read_mostly;
int bpf_jit_enable __read_mostly;


void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
{
	struct jit_ctx ctx;
	struct jit_ctx ctx;
	unsigned int alloc_size, tmp_idx;
	unsigned int alloc_size, tmp_idx;
@@ -1423,7 +1423,7 @@ out:
	kfree(ctx.offsets);
	kfree(ctx.offsets);
}
}


void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
{
	if (fp->jited)
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
		module_free(NULL, fp->bpf_func);
+4 −4
Original line number Original line Diff line number Diff line
@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
	flush_icache_range((unsigned long)start, (unsigned long)end);
	flush_icache_range((unsigned long)start, (unsigned long)end);
}
}


static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
				   struct codegen_context *ctx)
				   struct codegen_context *ctx)
{
{
	int i;
	int i;
@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)


/* Assemble the body code between the prologue & epilogue. */
/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
			      struct codegen_context *ctx,
			      struct codegen_context *ctx,
			      unsigned int *addrs)
			      unsigned int *addrs)
{
{
@@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
	return 0;
	return 0;
}
}


void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
{
	unsigned int proglen;
	unsigned int proglen;
	unsigned int alloclen;
	unsigned int alloclen;
@@ -693,7 +693,7 @@ out:
	return;
	return;
}
}


void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
{
	if (fp->jited)
	if (fp->jited)
		module_free(NULL, fp->bpf_func);
		module_free(NULL, fp->bpf_func);
+2 −2
Original line number Original line Diff line number Diff line
@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
	return header;
	return header;
}
}


void bpf_jit_compile(struct sk_filter *fp)
void bpf_jit_compile(struct bpf_prog *fp)
{
{
	struct bpf_binary_header *header = NULL;
	struct bpf_binary_header *header = NULL;
	unsigned long size, prg_len, lit_len;
	unsigned long size, prg_len, lit_len;
@@ -875,7 +875,7 @@ out:
	kfree(addrs);
	kfree(addrs);
}
}


void bpf_jit_free(struct sk_filter *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
{
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;
	struct bpf_binary_header *header = (void *)addr;
Loading