Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0157edc8 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'device-ops-as-cb'



Quentin Monnet says:

====================
For passing device functions for offloaded eBPF programs, there used to
be no place where to store the pointer without making the non-offloaded
programs pay a memory price.

As a consequence, three functions were called with ndo_bpf() through
specific commands. Now that we have struct bpf_offload_dev, and since none
of those operations rely on RTNL, we can turn these three commands into
hooks inside the struct bpf_prog_offload_ops, and pass them as part of
bpf_offload_dev_create().

This patch set changes the offload architecture to do so, and brings the
relevant changes to the nfp and netdevsim drivers.
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c8123ead 16a8cb5c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
		app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
	}

	bpf->bpf_dev = bpf_offload_dev_create();
	bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
	err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
	if (err)
		goto err_free_neutral_maps;
+5 −1
Original line number Diff line number Diff line
@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);

extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
		    int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);

extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;

struct netdev_bpf;
struct nfp_app;
+14 −20
Original line number Diff line number Diff line
@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
	struct nfp_bpf_neutral_map *record;
	int err;

	/* Map record paths are entered via ndo, update side is protected. */
	ASSERT_RTNL();

	/* Reuse path - other offloaded program is already tracking this map. */
	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
					nfp_bpf_maps_neutral_params);
@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
	bool freed = false;
	int i;

	ASSERT_RTNL();

	for (i = 0; i < nfp_prog->map_records_cnt; i++) {
		if (--nfp_prog->map_records[i]->count) {
			nfp_prog->map_records[i] = NULL;
@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
	kfree(nfp_prog);
}

static int
nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
		      struct netdev_bpf *bpf)
static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
	struct bpf_prog *prog = bpf->verifier.prog;
	struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
	struct nfp_app *app = nn->app;
	struct nfp_prog *nfp_prog;
	int ret;

@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
		goto err_free;

	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
	bpf->verifier.ops = &nfp_bpf_analyzer_ops;

	return 0;

@@ -219,8 +212,9 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
	return ret;
}

static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
static int nfp_bpf_translate(struct bpf_prog *prog)
{
	struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
	unsigned int max_instr;
	int err;
@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
	return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}

static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
static void nfp_bpf_destroy(struct bpf_prog *prog)
{
	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;

	kvfree(nfp_prog->prog);
	nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
	nfp_prog_free(nfp_prog);

	return 0;
}

/* Atomic engine requires values to be in big endian, we need to byte swap
@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
	switch (bpf->command) {
	case BPF_OFFLOAD_VERIFIER_PREP:
		return nfp_bpf_verifier_prep(app, nn, bpf);
	case BPF_OFFLOAD_TRANSLATE:
		return nfp_bpf_translate(nn, bpf->offload.prog);
	case BPF_OFFLOAD_DESTROY:
		return nfp_bpf_destroy(nn, bpf->offload.prog);
	case BPF_OFFLOAD_MAP_ALLOC:
		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
	case BPF_OFFLOAD_MAP_FREE:
@@ -601,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,

	return 0;
}

const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
	.insn_hook	= nfp_verify_insn,
	.finalize	= nfp_bpf_finalize,
	.prepare	= nfp_bpf_verifier_prep,
	.translate	= nfp_bpf_translate,
	.destroy	= nfp_bpf_destroy,
};
+3 −8
Original line number Diff line number Diff line
@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	return 0;
}

static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
		    int prev_insn_idx)
{
	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
@@ -745,7 +745,7 @@ nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
	goto continue_subprog;
}

static int nfp_bpf_finalize(struct bpf_verifier_env *env)
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
	struct bpf_subprog_info *info;
	struct nfp_prog *nfp_prog;
@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)

	return 0;
}

const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
	.insn_hook	= nfp_verify_insn,
	.finalize	= nfp_bpf_finalize,
};
+27 −24
Original line number Diff line number Diff line
@@ -91,11 +91,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env)
	return 0;
}

static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
	.insn_hook	= nsim_bpf_verify_insn,
	.finalize	= nsim_bpf_finalize,
};

static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
	return ns->xdp_hw.prog;
@@ -263,6 +258,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
	return 0;
}

static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
	struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);

	if (!ns->bpf_bind_accept)
		return -EOPNOTSUPP;

	return nsim_bpf_create_prog(ns, prog);
}

static int nsim_bpf_translate(struct bpf_prog *prog)
{
	struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;

	state->state = "xlated";
	return 0;
}

static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
	struct nsim_bpf_bound_prog *state;
@@ -275,6 +288,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
	kfree(state);
}

static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
	.insn_hook	= nsim_bpf_verify_insn,
	.finalize	= nsim_bpf_finalize,
	.prepare	= nsim_bpf_verifier_prep,
	.translate	= nsim_bpf_translate,
	.destroy	= nsim_bpf_destroy_prog,
};

static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
	if (bpf->prog && bpf->prog->aux->offload) {
@@ -539,24 +560,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
	ASSERT_RTNL();

	switch (bpf->command) {
	case BPF_OFFLOAD_VERIFIER_PREP:
		if (!ns->bpf_bind_accept)
			return -EOPNOTSUPP;

		err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
		if (err)
			return err;

		bpf->verifier.ops = &nsim_bpf_analyzer_ops;
		return 0;
	case BPF_OFFLOAD_TRANSLATE:
		state = bpf->offload.prog->aux->offload->dev_priv;

		state->state = "xlated";
		return 0;
	case BPF_OFFLOAD_DESTROY:
		nsim_bpf_destroy_prog(bpf->offload.prog);
		return 0;
	case XDP_QUERY_PROG:
		return xdp_attachment_query(&ns->xdp, bpf);
	case XDP_QUERY_PROG_HW:
@@ -599,7 +602,7 @@ int nsim_bpf_init(struct netdevsim *ns)
		if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
			return -ENOMEM;

		ns->sdev->bpf_dev = bpf_offload_dev_create();
		ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
		err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
		if (err)
			return err;
Loading