Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5620e1a8 authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-offload-report-dev'



Jakub Kicinski says:

====================
This series is a redo of reporting offload device information to
user space after the first attempt did not take into account name
spaces.  As requested by Kirill offloads are now protected by an
r/w sem.  This allows us to remove the workqueue and free the
offload state fully when device is removed (suggested by Alexei).

Net namespace is reported with a device/inode pair.

The accompanying bpftool support is placed in common code because
maps will have very similar info.  Note that the UAPI information
can't be nicely encapsulated into a struct, because in case we
need to grow the device information the new fields will have to
be added at the end of struct bpf_prog_info, we can't grow
structures in the middle of bpf_prog_info.

v3:
 - use dev_get_by_index();
 - redo ns code (new patch 6).
v2:
 - rework the locking in patch 1 (use RCU instead of locking
   dependencies);
 - grab RTNL for a short time in patch 6;
 - minor update to the test in patch 8.
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents fb982666 752d7b45
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -238,7 +238,7 @@ struct nfp_bpf_vnic {


int nfp_bpf_jit(struct nfp_prog *prog);
int nfp_bpf_jit(struct nfp_prog *prog);


extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;


struct netdev_bpf;
struct netdev_bpf;
struct nfp_app;
struct nfp_app;
+1 −1
Original line number Original line Diff line number Diff line
@@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
	return 0;
	return 0;
}
}


const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
	.insn_hook = nfp_verify_insn,
	.insn_hook = nfp_verify_insn,
};
};
+1 −1
Original line number Original line Diff line number Diff line
@@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
	return 0;
	return 0;
}
}


static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = {
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
	.insn_hook = nsim_bpf_verify_insn,
	.insn_hook = nsim_bpf_verify_insn,
};
};


+26 −3
Original line number Original line Diff line number Diff line
@@ -103,14 +103,14 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
	goto got_it;
	goto got_it;
}
}


void *ns_get_path(struct path *path, struct task_struct *task,
void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
			const struct proc_ns_operations *ns_ops)
		     void *private_data)
{
{
	struct ns_common *ns;
	struct ns_common *ns;
	void *ret;
	void *ret;


again:
again:
	ns = ns_ops->get(task);
	ns = ns_get_cb(private_data);
	if (!ns)
	if (!ns)
		return ERR_PTR(-ENOENT);
		return ERR_PTR(-ENOENT);


@@ -120,6 +120,29 @@ void *ns_get_path(struct path *path, struct task_struct *task,
	return ret;
	return ret;
}
}


struct ns_get_path_task_args {
	const struct proc_ns_operations *ns_ops;
	struct task_struct *task;
};

static struct ns_common *ns_get_path_task(void *private_data)
{
	struct ns_get_path_task_args *args = private_data;

	return args->ns_ops->get(args->task);
}

void *ns_get_path(struct path *path, struct task_struct *task,
		  const struct proc_ns_operations *ns_ops)
{
	struct ns_get_path_task_args args = {
		.ns_ops	= ns_ops,
		.task	= task,
	};

	return ns_get_path_cb(path, ns_get_path_task, &args);
}

int open_related_ns(struct ns_common *ns,
int open_related_ns(struct ns_common *ns,
		   struct ns_common *(*get_ns)(struct ns_common *ns))
		   struct ns_common *(*get_ns)(struct ns_common *ns))
{
{
+13 −3
Original line number Original line Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/numa.h>
#include <linux/numa.h>
#include <linux/wait.h>
#include <linux/wait.h>


struct bpf_verifier_env;
struct perf_event;
struct perf_event;
struct bpf_prog;
struct bpf_prog;
struct bpf_map;
struct bpf_map;
@@ -184,14 +185,18 @@ struct bpf_verifier_ops {
				  struct bpf_prog *prog, u32 *target_size);
				  struct bpf_prog *prog, u32 *target_size);
};
};


struct bpf_prog_offload_ops {
	int (*insn_hook)(struct bpf_verifier_env *env,
			 int insn_idx, int prev_insn_idx);
};

struct bpf_dev_offload {
struct bpf_dev_offload {
	struct bpf_prog		*prog;
	struct bpf_prog		*prog;
	struct net_device	*netdev;
	struct net_device	*netdev;
	void			*dev_priv;
	void			*dev_priv;
	struct list_head	offloads;
	struct list_head	offloads;
	bool			dev_state;
	bool			dev_state;
	bool			verifier_running;
	const struct bpf_prog_offload_ops *dev_ops;
	wait_queue_head_t	verifier_done;
};
};


struct bpf_prog_aux {
struct bpf_prog_aux {
@@ -201,6 +206,7 @@ struct bpf_prog_aux {
	u32 stack_depth;
	u32 stack_depth;
	u32 id;
	u32 id;
	u32 func_cnt;
	u32 func_cnt;
	bool offload_requested;
	struct bpf_prog **func;
	struct bpf_prog **func;
	void *jit_data; /* JIT specific data. arch dependent */
	void *jit_data; /* JIT specific data. arch dependent */
	struct latch_tree_node ksym_tnode;
	struct latch_tree_node ksym_tnode;
@@ -351,6 +357,8 @@ void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);


void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);

struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -523,13 +531,15 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,


int bpf_prog_offload_compile(struct bpf_prog *prog);
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog);


#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);


static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
{
	return aux->offload;
	return aux->offload_requested;
}
}
#else
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
Loading