Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f4f142ed authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull randomness updates from Ted Ts'o:
 "Cleanups and bug fixes to /dev/random, add a new getrandom(2) system
  call, which is a superset of OpenBSD's getentropy(2) call, for use
  with userspace crypto libraries such as LibreSSL.

  Also add the ability to have a kernel thread to pull entropy from
  hardware rng devices into /dev/random"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
  hwrng: Pass entropy to add_hwgenerator_randomness() in bits, not bytes
  random: limit the contribution of the hw rng to at most half
  random: introduce getrandom(2) system call
  hw_random: fix sparse warning (NULL vs 0 for pointer)
  random: use registers from interrupted code for CPU's w/o a cycle counter
  hwrng: add per-device entropy derating
  hwrng: create filler thread
  random: add_hwgenerator_randomness() for feeding entropy from devices
  random: use an improved fast_mix() function
  random: clean up interrupt entropy accounting for archs w/o cycle counters
  random: only update the last_pulled time if we actually transferred entropy
  random: remove unneeded hash of a portion of the entropy pool
  random: always update the entropy pool under the spinlock
parents bb2cbf5e e02b8765
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -361,3 +361,4 @@
352	i386	sched_getattr		sys_sched_getattr
352	i386	sched_getattr		sys_sched_getattr
353	i386	renameat2		sys_renameat2
353	i386	renameat2		sys_renameat2
354	i386	seccomp			sys_seccomp
354	i386	seccomp			sys_seccomp
355	i386	getrandom		sys_getrandom
+1 −0
Original line number Original line Diff line number Diff line
@@ -324,6 +324,7 @@
315	common	sched_getattr		sys_sched_getattr
315	common	sched_getattr		sys_sched_getattr
316	common	renameat2		sys_renameat2
316	common	renameat2		sys_renameat2
317	common	seccomp			sys_seccomp
317	common	seccomp			sys_seccomp
318	common	getrandom		sys_getrandom


#
#
# x32-specific system call numbers start at 512 to avoid cache impact
# x32-specific system call numbers start at 512 to avoid cache impact
+65 −2
Original line number Original line Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <linux/miscdevice.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/random.h>
@@ -50,10 +51,22 @@




static struct hwrng *current_rng;
static struct hwrng *current_rng;
static struct task_struct *hwrng_fill;
static LIST_HEAD(rng_list);
static LIST_HEAD(rng_list);
static DEFINE_MUTEX(rng_mutex);
static DEFINE_MUTEX(rng_mutex);
static int data_avail;
static int data_avail;
static u8 *rng_buffer;
static u8 *rng_buffer, *rng_fillbuf;
static unsigned short current_quality;
static unsigned short default_quality; /* = 0; default to "off" */

module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
		 "current hwrng entropy estimation per mill");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
		 "default entropy content of hwrng per mill");

static void start_khwrngd(void);


static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			       int wait);
			       int wait);
@@ -89,6 +102,15 @@ static inline int hwrng_init(struct hwrng *rng)
			return ret;
			return ret;
	}
	}
	add_early_randomness(rng);
	add_early_randomness(rng);

	current_quality = rng->quality ? : default_quality;
	current_quality &= 1023;

	if (current_quality == 0 && hwrng_fill)
		kthread_stop(hwrng_fill);
	if (current_quality > 0 && !hwrng_fill)
		start_khwrngd();

	return 0;
	return 0;
}
}


@@ -325,6 +347,36 @@ err_misc_dereg:
	goto out;
	goto out;
}
}


static int hwrng_fillfn(void *unused)
{
	long rc;

	while (!kthread_should_stop()) {
		if (!current_rng)
			break;
		rc = rng_get_data(current_rng, rng_fillbuf,
				  rng_buffer_size(), 1);
		if (rc <= 0) {
			pr_warn("hwrng: no data available\n");
			msleep_interruptible(10000);
			continue;
		}
		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
					   rc * current_quality * 8 >> 10);
	}
	hwrng_fill = NULL;
	return 0;
}

static void start_khwrngd(void)
{
	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
	if (hwrng_fill == ERR_PTR(-ENOMEM)) {
		pr_err("hwrng_fill thread creation failed");
		hwrng_fill = NULL;
	}
}

int hwrng_register(struct hwrng *rng)
int hwrng_register(struct hwrng *rng)
{
{
	int err = -EINVAL;
	int err = -EINVAL;
@@ -343,6 +395,13 @@ int hwrng_register(struct hwrng *rng)
		if (!rng_buffer)
		if (!rng_buffer)
			goto out_unlock;
			goto out_unlock;
	}
	}
	if (!rng_fillbuf) {
		rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
		if (!rng_fillbuf) {
			kfree(rng_buffer);
			goto out_unlock;
		}
	}


	/* Must not register two RNGs with the same name. */
	/* Must not register two RNGs with the same name. */
	err = -EEXIST;
	err = -EEXIST;
@@ -406,8 +465,11 @@ void hwrng_unregister(struct hwrng *rng)
				current_rng = NULL;
				current_rng = NULL;
		}
		}
	}
	}
	if (list_empty(&rng_list))
	if (list_empty(&rng_list)) {
		unregister_miscdev();
		unregister_miscdev();
		if (hwrng_fill)
			kthread_stop(hwrng_fill);
	}


	mutex_unlock(&rng_mutex);
	mutex_unlock(&rng_mutex);
}
}
@@ -418,6 +480,7 @@ static void __exit hwrng_exit(void)
	mutex_lock(&rng_mutex);
	mutex_lock(&rng_mutex);
	BUG_ON(current_rng);
	BUG_ON(current_rng);
	kfree(rng_buffer);
	kfree(rng_buffer);
	kfree(rng_fillbuf);
	mutex_unlock(&rng_mutex);
	mutex_unlock(&rng_mutex);
}
}


+187 −128
Original line number Original line Diff line number Diff line
@@ -250,6 +250,7 @@
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/fips.h>
@@ -257,6 +258,8 @@
#include <linux/kmemcheck.h>
#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/syscalls.h>
#include <linux/completion.h>


#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
@@ -267,6 +270,8 @@
#define CREATE_TRACE_POINTS
#define CREATE_TRACE_POINTS
#include <trace/events/random.h>
#include <trace/events/random.h>


/* #define ADD_INTERRUPT_BENCH */

/*
/*
 * Configuration information
 * Configuration information
 */
 */
@@ -401,6 +406,7 @@ static struct poolinfo {
 */
 */
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
static struct fasync_struct *fasync;
static struct fasync_struct *fasync;


/**********************************************************************
/**********************************************************************
@@ -481,9 +487,9 @@ static __u32 const twist_table[8] = {
 * the entropy is concentrated in the low-order bits.
 * the entropy is concentrated in the low-order bits.
 */
 */
static void _mix_pool_bytes(struct entropy_store *r, const void *in,
static void _mix_pool_bytes(struct entropy_store *r, const void *in,
			    int nbytes, __u8 out[64])
			    int nbytes)
{
{
	unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
	unsigned long i, tap1, tap2, tap3, tap4, tap5;
	int input_rotate;
	int input_rotate;
	int wordmask = r->poolinfo->poolwords - 1;
	int wordmask = r->poolinfo->poolwords - 1;
	const char *bytes = in;
	const char *bytes = in;
@@ -495,9 +501,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
	tap4 = r->poolinfo->tap4;
	tap4 = r->poolinfo->tap4;
	tap5 = r->poolinfo->tap5;
	tap5 = r->poolinfo->tap5;


	smp_rmb();
	input_rotate = r->input_rotate;
	input_rotate = ACCESS_ONCE(r->input_rotate);
	i = r->add_ptr;
	i = ACCESS_ONCE(r->add_ptr);


	/* mix one byte at a time to simplify size handling and churn faster */
	/* mix one byte at a time to simplify size handling and churn faster */
	while (nbytes--) {
	while (nbytes--) {
@@ -524,39 +529,33 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
		input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
		input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
	}
	}


	ACCESS_ONCE(r->input_rotate) = input_rotate;
	r->input_rotate = input_rotate;
	ACCESS_ONCE(r->add_ptr) = i;
	r->add_ptr = i;
	smp_wmb();

	if (out)
		for (j = 0; j < 16; j++)
			((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
}
}


static void __mix_pool_bytes(struct entropy_store *r, const void *in,
static void __mix_pool_bytes(struct entropy_store *r, const void *in,
			     int nbytes, __u8 out[64])
			     int nbytes)
{
{
	trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
	trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
	_mix_pool_bytes(r, in, nbytes, out);
	_mix_pool_bytes(r, in, nbytes);
}
}


static void mix_pool_bytes(struct entropy_store *r, const void *in,
static void mix_pool_bytes(struct entropy_store *r, const void *in,
			   int nbytes, __u8 out[64])
			   int nbytes)
{
{
	unsigned long flags;
	unsigned long flags;


	trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
	trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
	spin_lock_irqsave(&r->lock, flags);
	spin_lock_irqsave(&r->lock, flags);
	_mix_pool_bytes(r, in, nbytes, out);
	_mix_pool_bytes(r, in, nbytes);
	spin_unlock_irqrestore(&r->lock, flags);
	spin_unlock_irqrestore(&r->lock, flags);
}
}


struct fast_pool {
struct fast_pool {
	__u32		pool[4];
	__u32		pool[4];
	unsigned long	last;
	unsigned long	last;
	unsigned short	count;
	unsigned short	reg_idx;
	unsigned char	rotate;
	unsigned char	count;
	unsigned char	last_timer_intr;
};
};


/*
/*
@@ -564,25 +563,29 @@ struct fast_pool {
 * collector.  It's hardcoded for an 128 bit pool and assumes that any
 * collector.  It's hardcoded for an 128 bit pool and assumes that any
 * locks that might be needed are taken by the caller.
 * locks that might be needed are taken by the caller.
 */
 */
static void fast_mix(struct fast_pool *f, __u32 input[4])
static void fast_mix(struct fast_pool *f)
{
{
	__u32		w;
	__u32 a = f->pool[0],	b = f->pool[1];
	unsigned	input_rotate = f->rotate;
	__u32 c = f->pool[2],	d = f->pool[3];


	w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
	a += b;			c += d;
	f->pool[0] = (w >> 3) ^ twist_table[w & 7];
	b = rol32(a, 6);	d = rol32(c, 27);
	input_rotate = (input_rotate + 14) & 31;
	d ^= a;			b ^= c;
	w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];

	f->pool[1] = (w >> 3) ^ twist_table[w & 7];
	a += b;			c += d;
	input_rotate = (input_rotate + 7) & 31;
	b = rol32(a, 16);	d = rol32(c, 14);
	w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
	d ^= a;			b ^= c;
	f->pool[2] = (w >> 3) ^ twist_table[w & 7];

	input_rotate = (input_rotate + 7) & 31;
	a += b;			c += d;
	w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
	b = rol32(a, 6);	d = rol32(c, 27);
	f->pool[3] = (w >> 3) ^ twist_table[w & 7];
	d ^= a;			b ^= c;
	input_rotate = (input_rotate + 7) & 31;


	a += b;			c += d;
	f->rotate = input_rotate;
	b = rol32(a, 16);	d = rol32(c, 14);
	d ^= a;			b ^= c;

	f->pool[0] = a;  f->pool[1] = b;
	f->pool[2] = c;  f->pool[3] = d;
	f->count++;
	f->count++;
}
}


@@ -657,6 +660,7 @@ retry:
		r->entropy_total = 0;
		r->entropy_total = 0;
		if (r == &nonblocking_pool) {
		if (r == &nonblocking_pool) {
			prandom_reseed_late();
			prandom_reseed_late();
			wake_up_interruptible(&urandom_init_wait);
			pr_notice("random: %s pool is initialized\n", r->name);
			pr_notice("random: %s pool is initialized\n", r->name);
		}
		}
	}
	}
@@ -739,13 +743,13 @@ void add_device_randomness(const void *buf, unsigned int size)


	trace_add_device_randomness(size, _RET_IP_);
	trace_add_device_randomness(size, _RET_IP_);
	spin_lock_irqsave(&input_pool.lock, flags);
	spin_lock_irqsave(&input_pool.lock, flags);
	_mix_pool_bytes(&input_pool, buf, size, NULL);
	_mix_pool_bytes(&input_pool, buf, size);
	_mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
	_mix_pool_bytes(&input_pool, &time, sizeof(time));
	spin_unlock_irqrestore(&input_pool.lock, flags);
	spin_unlock_irqrestore(&input_pool.lock, flags);


	spin_lock_irqsave(&nonblocking_pool.lock, flags);
	spin_lock_irqsave(&nonblocking_pool.lock, flags);
	_mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
	_mix_pool_bytes(&nonblocking_pool, buf, size);
	_mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
	_mix_pool_bytes(&nonblocking_pool, &time, sizeof(time));
	spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
	spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
}
}
EXPORT_SYMBOL(add_device_randomness);
EXPORT_SYMBOL(add_device_randomness);
@@ -778,7 +782,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
	sample.cycles = random_get_entropy();
	sample.cycles = random_get_entropy();
	sample.num = num;
	sample.num = num;
	r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
	r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
	mix_pool_bytes(r, &sample, sizeof(sample), NULL);
	mix_pool_bytes(r, &sample, sizeof(sample));


	/*
	/*
	 * Calculate number of bits of randomness we probably added.
	 * Calculate number of bits of randomness we probably added.
@@ -835,6 +839,38 @@ EXPORT_SYMBOL_GPL(add_input_randomness);


static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
static DEFINE_PER_CPU(struct fast_pool, irq_randomness);


#ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation;

#define AVG_SHIFT 8     /* Exponential average factor k=1/256 */
#define FIXED_1_2 (1 << (AVG_SHIFT-1))

static void add_interrupt_bench(cycles_t start)
{
        long delta = random_get_entropy() - start;

        /* Use a weighted moving average */
        delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
        avg_cycles += delta;
        /* And average deviation */
        delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
        avg_deviation += delta;
}
#else
#define add_interrupt_bench(x)
#endif

static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
	__u32 *ptr = (__u32 *) regs;

	if (regs == NULL)
		return 0;
	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
		f->reg_idx = 0;
	return *(ptr + f->reg_idx++);
}

void add_interrupt_randomness(int irq, int irq_flags)
void add_interrupt_randomness(int irq, int irq_flags)
{
{
	struct entropy_store	*r;
	struct entropy_store	*r;
@@ -842,55 +878,52 @@ void add_interrupt_randomness(int irq, int irq_flags)
	struct pt_regs		*regs = get_irq_regs();
	struct pt_regs		*regs = get_irq_regs();
	unsigned long		now = jiffies;
	unsigned long		now = jiffies;
	cycles_t		cycles = random_get_entropy();
	cycles_t		cycles = random_get_entropy();
	__u32			input[4], c_high, j_high;
	__u32			c_high, j_high;
	__u64			ip;
	__u64			ip;
	unsigned long		seed;
	unsigned long		seed;
	int			credit;
	int			credit = 0;


	if (cycles == 0)
		cycles = get_reg(fast_pool, regs);
	c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
	c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
	j_high = (sizeof(now) > 4) ? now >> 32 : 0;
	j_high = (sizeof(now) > 4) ? now >> 32 : 0;
	input[0] = cycles ^ j_high ^ irq;
	fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
	input[1] = now ^ c_high;
	fast_pool->pool[1] ^= now ^ c_high;
	ip = regs ? instruction_pointer(regs) : _RET_IP_;
	ip = regs ? instruction_pointer(regs) : _RET_IP_;
	input[2] = ip;
	fast_pool->pool[2] ^= ip;
	input[3] = ip >> 32;
	fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
		get_reg(fast_pool, regs);


	fast_mix(fast_pool, input);
	fast_mix(fast_pool);
	add_interrupt_bench(cycles);


	if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
	if ((fast_pool->count < 64) &&
	    !time_after(now, fast_pool->last + HZ))
		return;
		return;


	fast_pool->last = now;

	r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
	r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
	__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
	if (!spin_trylock(&r->lock))
		return;


	/*
	fast_pool->last = now;
	 * If we don't have a valid cycle counter, and we see
	__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
	 * back-to-back timer interrupts, then skip giving credit for
	 * any entropy, otherwise credit 1 bit.
	 */
	credit = 1;
	if (cycles == 0) {
		if (irq_flags & __IRQF_TIMER) {
			if (fast_pool->last_timer_intr)
				credit = 0;
			fast_pool->last_timer_intr = 1;
		} else
			fast_pool->last_timer_intr = 0;
	}


	/*
	/*
	 * If we have architectural seed generator, produce a seed and
	 * If we have architectural seed generator, produce a seed and
	 * add it to the pool.  For the sake of paranoia count it as
	 * add it to the pool.  For the sake of paranoia don't let the
	 * 50% entropic.
	 * architectural seed generator dominate the input from the
	 * interrupt noise.
	 */
	 */
	if (arch_get_random_seed_long(&seed)) {
	if (arch_get_random_seed_long(&seed)) {
		__mix_pool_bytes(r, &seed, sizeof(seed), NULL);
		__mix_pool_bytes(r, &seed, sizeof(seed));
		credit += sizeof(seed) * 4;
		credit = 1;
	}
	}
	spin_unlock(&r->lock);


	credit_entropy_bits(r, credit);
	fast_pool->count = 0;

	/* award one bit for the contents of the fast pool */
	credit_entropy_bits(r, credit + 1);
}
}


#ifdef CONFIG_BLOCK
#ifdef CONFIG_BLOCK
@@ -922,6 +955,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
{
	if (!r->pull ||
	    r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
	    r->entropy_count > r->poolinfo->poolfracbits)
		return;

	if (r->limit == 0 && random_min_urandom_seed) {
	if (r->limit == 0 && random_min_urandom_seed) {
		unsigned long now = jiffies;
		unsigned long now = jiffies;


@@ -930,9 +968,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
			return;
			return;
		r->last_pulled = now;
		r->last_pulled = now;
	}
	}
	if (r->pull &&

	    r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
	    r->entropy_count < r->poolinfo->poolfracbits)
	_xfer_secondary_pool(r, nbytes);
	_xfer_secondary_pool(r, nbytes);
}
}


@@ -953,7 +989,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
				  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
				  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
	bytes = extract_entropy(r->pull, tmp, bytes,
	bytes = extract_entropy(r->pull, tmp, bytes,
				random_read_wakeup_bits / 8, rsvd_bytes);
				random_read_wakeup_bits / 8, rsvd_bytes);
	mix_pool_bytes(r, tmp, bytes, NULL);
	mix_pool_bytes(r, tmp, bytes);
	credit_entropy_bits(r, bytes*8);
	credit_entropy_bits(r, bytes*8);
}
}


@@ -1039,7 +1075,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
		unsigned long l[LONGS(20)];
		unsigned long l[LONGS(20)];
	} hash;
	} hash;
	__u32 workspace[SHA_WORKSPACE_WORDS];
	__u32 workspace[SHA_WORKSPACE_WORDS];
	__u8 extract[64];
	unsigned long flags;
	unsigned long flags;


	/*
	/*
@@ -1068,15 +1103,9 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
	 * brute-forcing the feedback as hard as brute-forcing the
	 * brute-forcing the feedback as hard as brute-forcing the
	 * hash.
	 * hash.
	 */
	 */
	__mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
	__mix_pool_bytes(r, hash.w, sizeof(hash.w));
	spin_unlock_irqrestore(&r->lock, flags);
	spin_unlock_irqrestore(&r->lock, flags);


	/*
	 * To avoid duplicates, we atomically extract a portion of the
	 * pool while mixing, and hash one final time.
	 */
	sha_transform(hash.w, extract, workspace);
	memset(extract, 0, sizeof(extract));
	memset(workspace, 0, sizeof(workspace));
	memset(workspace, 0, sizeof(workspace));


	/*
	/*
@@ -1160,13 +1189,14 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
{
{
	ssize_t ret = 0, i;
	ssize_t ret = 0, i;
	__u8 tmp[EXTRACT_SIZE];
	__u8 tmp[EXTRACT_SIZE];
	int large_request = (nbytes > 256);


	trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
	trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
	xfer_secondary_pool(r, nbytes);
	xfer_secondary_pool(r, nbytes);
	nbytes = account(r, nbytes, 0, 0);
	nbytes = account(r, nbytes, 0, 0);


	while (nbytes) {
	while (nbytes) {
		if (need_resched()) {
		if (large_request && need_resched()) {
			if (signal_pending(current)) {
			if (signal_pending(current)) {
				if (ret == 0)
				if (ret == 0)
					ret = -ERESTARTSYS;
					ret = -ERESTARTSYS;
@@ -1263,14 +1293,14 @@ static void init_std_data(struct entropy_store *r)
	unsigned long rv;
	unsigned long rv;


	r->last_pulled = jiffies;
	r->last_pulled = jiffies;
	mix_pool_bytes(r, &now, sizeof(now), NULL);
	mix_pool_bytes(r, &now, sizeof(now));
	for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
	for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
		if (!arch_get_random_seed_long(&rv) &&
		if (!arch_get_random_seed_long(&rv) &&
		    !arch_get_random_long(&rv))
		    !arch_get_random_long(&rv))
			rv = random_get_entropy();
			rv = random_get_entropy();
		mix_pool_bytes(r, &rv, sizeof(rv), NULL);
		mix_pool_bytes(r, &rv, sizeof(rv));
	}
	}
	mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
	mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
}


/*
/*
@@ -1309,39 +1339,8 @@ void rand_initialize_disk(struct gendisk *disk)
}
}
#endif
#endif


/*
 * Attempt an emergency refill using arch_get_random_seed_long().
 *
 * As with add_interrupt_randomness() be paranoid and only
 * credit the output as 50% entropic.
 */
static int arch_random_refill(void)
{
	const unsigned int nlongs = 64;	/* Arbitrary number */
	unsigned int n = 0;
	unsigned int i;
	unsigned long buf[nlongs];

	if (!arch_has_random_seed())
		return 0;

	for (i = 0; i < nlongs; i++) {
		if (arch_get_random_seed_long(&buf[n]))
			n++;
	}

	if (n) {
		unsigned int rand_bytes = n * sizeof(unsigned long);

		mix_pool_bytes(&input_pool, buf, rand_bytes, NULL);
		credit_entropy_bits(&input_pool, rand_bytes*4);
	}

	return n;
}

static ssize_t
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
_random_read(int nonblock, char __user *buf, size_t nbytes)
{
{
	ssize_t n;
	ssize_t n;


@@ -1360,12 +1359,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
			return n;
			return n;


		/* Pool is (near) empty.  Maybe wait and retry. */
		/* Pool is (near) empty.  Maybe wait and retry. */

		if (nonblock)
		/* First try an emergency refill */
		if (arch_random_refill())
			continue;

		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;
			return -EAGAIN;


		wait_event_interruptible(random_read_wait,
		wait_event_interruptible(random_read_wait,
@@ -1376,6 +1370,12 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
	}
	}
}
}


static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
	return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
}

static ssize_t
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
{
@@ -1424,7 +1424,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
		count -= bytes;
		count -= bytes;
		p += bytes;
		p += bytes;


		mix_pool_bytes(r, buf, bytes, NULL);
		mix_pool_bytes(r, buf, bytes);
		cond_resched();
		cond_resched();
	}
	}


@@ -1520,6 +1520,29 @@ const struct file_operations urandom_fops = {
	.llseek = noop_llseek,
	.llseek = noop_llseek,
};
};


SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
		unsigned int, flags)
{
	if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
		return -EINVAL;

	if (count > INT_MAX)
		count = INT_MAX;

	if (flags & GRND_RANDOM)
		return _random_read(flags & GRND_NONBLOCK, buf, count);

	if (unlikely(nonblocking_pool.initialized == 0)) {
		if (flags & GRND_NONBLOCK)
			return -EAGAIN;
		wait_event_interruptible(urandom_init_wait,
					 nonblocking_pool.initialized);
		if (signal_pending(current))
			return -ERESTARTSYS;
	}
	return urandom_read(NULL, buf, count, NULL);
}

/***************************************************************
/***************************************************************
 * Random UUID interface
 * Random UUID interface
 *
 *
@@ -1663,6 +1686,22 @@ struct ctl_table random_table[] = {
		.mode		= 0444,
		.mode		= 0444,
		.proc_handler	= proc_do_uuid,
		.proc_handler	= proc_do_uuid,
	},
	},
#ifdef ADD_INTERRUPT_BENCH
	{
		.procname	= "add_interrupt_avg_cycles",
		.data		= &avg_cycles,
		.maxlen		= sizeof(avg_cycles),
		.mode		= 0444,
		.proc_handler	= proc_doulongvec_minmax,
	},
	{
		.procname	= "add_interrupt_avg_deviation",
		.data		= &avg_deviation,
		.maxlen		= sizeof(avg_deviation),
		.mode		= 0444,
		.proc_handler	= proc_doulongvec_minmax,
	},
#endif
	{ }
	{ }
};
};
#endif 	/* CONFIG_SYSCTL */
#endif 	/* CONFIG_SYSCTL */
@@ -1719,3 +1758,23 @@ randomize_range(unsigned long start, unsigned long end, unsigned long len)
		return 0;
		return 0;
	return PAGE_ALIGN(get_random_int() % range + start);
	return PAGE_ALIGN(get_random_int() % range + start);
}
}

/* Interface for in-kernel drivers of true hardware RNGs.
 * Those devices may produce endless random bits and will be throttled
 * when our pool is full.
 */
void add_hwgenerator_randomness(const char *buffer, size_t count,
				size_t entropy)
{
	struct entropy_store *poolp = &input_pool;

	/* Suspend writing if we're above the trickle threshold.
	 * We'll be woken up again once below random_write_wakeup_thresh,
	 * or when the calling thread is about to terminate.
	 */
	wait_event_interruptible(random_write_wait, kthread_should_stop() ||
			ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
	mix_pool_bytes(poolp, buffer, count);
	credit_entropy_bits(poolp, entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+5 −0
Original line number Original line Diff line number Diff line
@@ -29,6 +29,8 @@
 * @read:		New API. drivers can fill up to max bytes of data
 * @read:		New API. drivers can fill up to max bytes of data
 *			into the buffer. The buffer is aligned for any type.
 *			into the buffer. The buffer is aligned for any type.
 * @priv:		Private data, for use by the RNG driver.
 * @priv:		Private data, for use by the RNG driver.
 * @quality:		Estimation of true entropy in RNG's bitstream
 *			(per mill).
 */
 */
struct hwrng {
struct hwrng {
	const char *name;
	const char *name;
@@ -38,6 +40,7 @@ struct hwrng {
	int (*data_read)(struct hwrng *rng, u32 *data);
	int (*data_read)(struct hwrng *rng, u32 *data);
	int (*read)(struct hwrng *rng, void *data, size_t max, bool wait);
	int (*read)(struct hwrng *rng, void *data, size_t max, bool wait);
	unsigned long priv;
	unsigned long priv;
	unsigned short quality;


	/* internal. */
	/* internal. */
	struct list_head list;
	struct list_head list;
@@ -47,5 +50,7 @@ struct hwrng {
extern int hwrng_register(struct hwrng *rng);
extern int hwrng_register(struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void hwrng_unregister(struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);


#endif /* LINUX_HWRANDOM_H_ */
#endif /* LINUX_HWRANDOM_H_ */
Loading