Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9d436efe authored by Jason A. Donenfeld's avatar Jason A. Donenfeld Committed by Greg Kroah-Hartman
Browse files

random: tie batched entropy generation to base_crng generation



commit 0791e8b655cc373718f0f58800fdc625a3447ac5 upstream.

Now that we have an explicit base_crng generation counter, we don't need
a separate one for batched entropy. Rather, we can just move the
generation forward every time we change crng_init state or update the
base_crng key.

Cc: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarDominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c1fc9536
Loading
Loading
Loading
Loading
+8 −21
Original line number Diff line number Diff line
@@ -429,8 +429,6 @@ static DEFINE_PER_CPU(struct crng, crngs) = {

static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);

static void invalidate_batched_entropy(void);

/*
 * crng_fast_load() can be called by code in the interrupt service
 * path.  So we can't afford to dilly-dally. Returns the number of
@@ -453,7 +451,7 @@ static size_t crng_fast_load(const void *cp, size_t len)
		src++; crng_init_cnt++; len--; ret++;
	}
	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
		invalidate_batched_entropy();
		++base_crng.generation;
		crng_init = 1;
	}
	spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -581,7 +579,6 @@ static void crng_reseed(void)
	WRITE_ONCE(base_crng.generation, next_gen);
	WRITE_ONCE(base_crng.birth, jiffies);
	if (crng_init < 2) {
		invalidate_batched_entropy();
		crng_init = 2;
		finalize_init = true;
	}
@@ -1306,8 +1303,9 @@ int __init rand_initialize(void)
	mix_pool_bytes(utsname(), sizeof(*(utsname())));

	extract_entropy(base_crng.key, sizeof(base_crng.key));
	++base_crng.generation;

	if (arch_init && trust_cpu && crng_init < 2) {
		invalidate_batched_entropy();
		crng_init = 2;
		pr_notice("crng init done (trusting CPU's manufacturer)\n");
	}
@@ -1645,8 +1643,6 @@ struct ctl_table random_table[] = {
};
#endif	/* CONFIG_SYSCTL */

static atomic_t batch_generation = ATOMIC_INIT(0);

struct batched_entropy {
	union {
		/*
@@ -1659,8 +1655,8 @@ struct batched_entropy {
		u64 entropy_u64[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
		u32 entropy_u32[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
	};
	unsigned long generation;
	unsigned int position;
	int generation;
};

/*
@@ -1679,14 +1675,14 @@ u64 get_random_u64(void)
	unsigned long flags;
	struct batched_entropy *batch;
	static void *previous;
	int next_gen;
	unsigned long next_gen;

	warn_unseeded_randomness(&previous);

	local_irq_save(flags);
	batch = raw_cpu_ptr(&batched_entropy_u64);

	next_gen = atomic_read(&batch_generation);
	next_gen = READ_ONCE(base_crng.generation);
	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
	    next_gen != batch->generation) {
		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
@@ -1712,14 +1708,14 @@ u32 get_random_u32(void)
	unsigned long flags;
	struct batched_entropy *batch;
	static void *previous;
	int next_gen;
	unsigned long next_gen;

	warn_unseeded_randomness(&previous);

	local_irq_save(flags);
	batch = raw_cpu_ptr(&batched_entropy_u32);

	next_gen = atomic_read(&batch_generation);
	next_gen = READ_ONCE(base_crng.generation);
	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
	    next_gen != batch->generation) {
		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
@@ -1735,15 +1731,6 @@ u32 get_random_u32(void)
}
EXPORT_SYMBOL(get_random_u32);

/* It's important to invalidate all potential batched entropy that might
 * be stored before the crng is initialized, which we can do lazily by
 * bumping the generation counter.
 */
static void invalidate_batched_entropy(void)
{
	atomic_inc(&batch_generation);
}

/**
 * randomize_page - Generate a random, page aligned address
 * @start:	The smallest acceptable address the caller will take.