Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5ad9345d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull randomness fixes from Ted Ts'o:
 "Improve performance by using a lockless update mechanism suggested by
  Linus, and make sure we refresh per-CPU entropy returned get_random_*
  as soon as the CRNG is initialized"

* tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
  random: invalidate batched entropy after crng init
  random: use lockless method of accessing and updating f->reg_idx
parents 5e38b72a b169c13d
Loading
Loading
Loading
Loading
+43 −6
Original line number Original line Diff line number Diff line
/*
/*
 * random.c -- A strong random number generator
 * random.c -- A strong random number generator
 *
 *
 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
 * Rights Reserved.
 *
 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
 *
 *
 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct crng_state **crng_node_pool __read_mostly;
static struct crng_state **crng_node_pool __read_mostly;
#endif
#endif


static void invalidate_batched_entropy(void);

static void crng_initialize(struct crng_state *crng)
static void crng_initialize(struct crng_state *crng)
{
{
	int		i;
	int		i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
		cp++; crng_init_cnt++; len--;
		cp++; crng_init_cnt++; len--;
	}
	}
	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
		invalidate_batched_entropy();
		crng_init = 1;
		crng_init = 1;
		wake_up_interruptible(&crng_init_wait);
		wake_up_interruptible(&crng_init_wait);
		pr_notice("random: fast init done\n");
		pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
	memzero_explicit(&buf, sizeof(buf));
	memzero_explicit(&buf, sizeof(buf));
	crng->init_time = jiffies;
	crng->init_time = jiffies;
	if (crng == &primary_crng && crng_init < 2) {
	if (crng == &primary_crng && crng_init < 2) {
		invalidate_batched_entropy();
		crng_init = 2;
		crng_init = 2;
		process_random_ready_list();
		process_random_ready_list();
		wake_up_interruptible(&crng_init_wait);
		wake_up_interruptible(&crng_init_wait);
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
{
	__u32 *ptr = (__u32 *) regs;
	__u32 *ptr = (__u32 *) regs;
	unsigned long flags;
	unsigned int idx;


	if (regs == NULL)
	if (regs == NULL)
		return 0;
		return 0;
	local_irq_save(flags);
	idx = READ_ONCE(f->reg_idx);
	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
	if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
		f->reg_idx = 0;
		idx = 0;
	ptr += f->reg_idx++;
	ptr += idx++;
	local_irq_restore(flags);
	WRITE_ONCE(f->reg_idx, idx);
	return *ptr;
	return *ptr;
}
}


@@ -2023,6 +2030,7 @@ struct batched_entropy {
	};
	};
	unsigned int position;
	unsigned int position;
};
};
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);


/*
/*
 * Get a random word for internal kernel use only. The quality of the random
 * Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
u64 get_random_u64(void)
{
{
	u64 ret;
	u64 ret;
	bool use_lock = crng_init < 2;
	unsigned long flags;
	struct batched_entropy *batch;
	struct batched_entropy *batch;


#if BITS_PER_LONG == 64
#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
#endif
#endif


	batch = &get_cpu_var(batched_entropy_u64);
	batch = &get_cpu_var(batched_entropy_u64);
	if (use_lock)
		read_lock_irqsave(&batched_entropy_reset_lock, flags);
	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
		extract_crng((u8 *)batch->entropy_u64);
		extract_crng((u8 *)batch->entropy_u64);
		batch->position = 0;
		batch->position = 0;
	}
	}
	ret = batch->entropy_u64[batch->position++];
	ret = batch->entropy_u64[batch->position++];
	if (use_lock)
		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
	put_cpu_var(batched_entropy_u64);
	put_cpu_var(batched_entropy_u64);
	return ret;
	return ret;
}
}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
u32 get_random_u32(void)
{
{
	u32 ret;
	u32 ret;
	bool use_lock = crng_init < 2;
	unsigned long flags;
	struct batched_entropy *batch;
	struct batched_entropy *batch;


	if (arch_get_random_int(&ret))
	if (arch_get_random_int(&ret))
		return ret;
		return ret;


	batch = &get_cpu_var(batched_entropy_u32);
	batch = &get_cpu_var(batched_entropy_u32);
	if (use_lock)
		read_lock_irqsave(&batched_entropy_reset_lock, flags);
	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
		extract_crng((u8 *)batch->entropy_u32);
		extract_crng((u8 *)batch->entropy_u32);
		batch->position = 0;
		batch->position = 0;
	}
	}
	ret = batch->entropy_u32[batch->position++];
	ret = batch->entropy_u32[batch->position++];
	if (use_lock)
		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
	put_cpu_var(batched_entropy_u32);
	put_cpu_var(batched_entropy_u32);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(get_random_u32);
EXPORT_SYMBOL(get_random_u32);


/* It's important to invalidate all potential batched entropy that might
 * be stored before the crng is initialized, which we can do lazily by
 * simply resetting the counter to zero so that it's re-extracted on the
 * next usage. */
static void invalidate_batched_entropy(void)
{
	int cpu;
	unsigned long flags;

	write_lock_irqsave(&batched_entropy_reset_lock, flags);
	for_each_possible_cpu (cpu) {
		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
	}
	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
}

/**
/**
 * randomize_page - Generate a random, page aligned address
 * randomize_page - Generate a random, page aligned address
 * @start:	The smallest acceptable address the caller will take.
 * @start:	The smallest acceptable address the caller will take.