Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df718423 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf_random32'



Daniel Borkmann says:

====================
BPF/random32 updates

BPF update to split the prandom state apart, and to move the
*once helpers to the core. For details, please see individual
patches. Given the changes and since it's in the tree for
quite some time, net-next is a better choice in our opinion.

v1 -> v2:
 - Make DO_ONCE() type-safe, remove the kvec helper. Credits
   go to Alexei Starovoitov for the __VA_ARGS__ hint, thanks!
 - Add a comment to the DO_ONCE() helper as suggested by Alexei.
 - Rework prandom_init_once() helper to the new API.
 - Keep Alexei's Acked-by on the last patch.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 28335a74 3ad00405
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -200,4 +200,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;


/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

#endif /* _LINUX_BPF_H */
#endif /* _LINUX_BPF_H */
+4 −17
Original line number Original line Diff line number Diff line
@@ -24,7 +24,8 @@
#include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/rcupdate.h>
#include <linux/jump_label.h>
#include <linux/once.h>

#include <uapi/linux/net.h>
#include <uapi/linux/net.h>


struct poll_table_struct;
struct poll_table_struct;
@@ -250,22 +251,8 @@ do { \
	} while (0)
	} while (0)
#endif
#endif


bool __net_get_random_once(void *buf, int nbytes, bool *done,
			   struct static_key *done_key);

#define net_get_random_once(buf, nbytes)			\
#define net_get_random_once(buf, nbytes)			\
	({								\
	get_random_once((buf), (nbytes))
		bool ___ret = false;					\
		static bool ___done = false;				\
		static struct static_key ___once_key =			\
			STATIC_KEY_INIT_TRUE;				\
		if (static_key_true(&___once_key))			\
			___ret = __net_get_random_once(buf,		\
						       nbytes,		\
						       &___done,	\
						       &___once_key);	\
		___ret;							\
	})


int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
		   size_t num, size_t len);
		   size_t num, size_t len);

include/linux/once.h

0 → 100644
+57 −0
Original line number Original line Diff line number Diff line
#ifndef _LINUX_ONCE_H
#define _LINUX_ONCE_H

#include <linux/types.h>
#include <linux/jump_label.h>

bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key *once_key,
		    unsigned long *flags);

/* Call a function exactly once. The idea of DO_ONCE() is to perform
 * a function call such as initialization of random seeds, etc, only
 * once, where DO_ONCE() can live in the fast-path. After @func has
 * been called with the passed arguments, the static key will patch
 * out the condition into a nop. DO_ONCE() guarantees type safety of
 * arguments!
 *
 * Not that the following is not equivalent ...
 *
 *   DO_ONCE(func, arg);
 *   DO_ONCE(func, arg);
 *
 * ... to this version:
 *
 *   void foo(void)
 *   {
 *     DO_ONCE(func, arg);
 *   }
 *
 *   foo();
 *   foo();
 *
 * In case the one-time invocation could be triggered from multiple
 * places, then a common helper function must be defined, so that only
 * a single static key will be placed there!
 */
#define DO_ONCE(func, ...)						     \
	({								     \
		bool ___ret = false;					     \
		static bool ___done = false;				     \
		static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
		if (static_key_true(&___once_key)) {			     \
			unsigned long ___flags;				     \
			___ret = __do_once_start(&___done, &___flags);	     \
			if (unlikely(___ret)) {				     \
				func(__VA_ARGS__);			     \
				__do_once_done(&___done, &___once_key,	     \
					       &___flags);		     \
			}						     \
		}							     \
		___ret;							     \
	})

#define get_random_once(buf, nbytes)					     \
	DO_ONCE(get_random_bytes, (buf), (nbytes))

#endif /* _LINUX_ONCE_H */
+6 −0
Original line number Original line Diff line number Diff line
@@ -7,6 +7,8 @@
#define _LINUX_RANDOM_H
#define _LINUX_RANDOM_H


#include <linux/list.h>
#include <linux/list.h>
#include <linux/once.h>

#include <uapi/linux/random.h>
#include <uapi/linux/random.h>


struct random_ready_callback {
struct random_ready_callback {
@@ -45,6 +47,10 @@ struct rnd_state {


u32 prandom_u32_state(struct rnd_state *state);
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);

#define prandom_init_once(pcpu_state)			\
	DO_ONCE(prandom_seed_full_state, (pcpu_state))


/**
/**
 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+26 −0
Original line number Original line Diff line number Diff line
@@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp)
}
}
EXPORT_SYMBOL_GPL(bpf_prog_free);
EXPORT_SYMBOL_GPL(bpf_prog_free);


/* RNG for unpriviledged user space with separated state from prandom_u32(). */
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);

void bpf_user_rnd_init_once(void)
{
	prandom_init_once(&bpf_user_rnd_state);
}

u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	/* Should someone ever have the rather unwise idea to use some
	 * of the registers passed into this function, then note that
	 * this function is called from native eBPF and classic-to-eBPF
	 * transformations. Register assignments from both sides are
	 * different, f.e. classic always sets fn(ctx, A, X) here.
	 */
	struct rnd_state *state;
	u32 res;

	state = &get_cpu_var(bpf_user_rnd_state);
	res = prandom_u32_state(state);
	put_cpu_var(state);

	return res;
}

/* Weak definitions of helper functions in case we don't have bpf syscall. */
/* Weak definitions of helper functions in case we don't have bpf syscall. */
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
Loading