Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 39a22213 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "random: convert get_random_int/long into get_random_u32/u64"

parents c177dfdc 1ac040b1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -383,9 +383,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
{
	unsigned long res = n;
	kasan_check_write(to, n);
	check_object_size(to, n, false);

	if (access_ok(VERIFY_READ, from, n)) {
		check_object_size(to, n, false);
		res = __arch_copy_from_user(to, from, n);
	}
	if (unlikely(res))
@@ -396,9 +396,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
	kasan_check_read(from, n);
	check_object_size(from, n, true);

	if (access_ok(VERIFY_WRITE, to, n)) {
		check_object_size(from, n, true);
		n = __arch_copy_to_user(to, from, n);
	}
	return n;
+27 −28
Original line number Diff line number Diff line
@@ -2044,8 +2044,8 @@ struct ctl_table random_table[] = {

struct batched_entropy {
	union {
		unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
		unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
		u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
		u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
	};
	unsigned int position;
};
@@ -2055,52 +2055,51 @@ struct batched_entropy {
 * number is either as good as RDRAND or as good as /dev/urandom, with the
 * goal of being quite fast and not depleting entropy.
 */
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
unsigned long get_random_long(void)
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
	unsigned long ret;
	u64 ret;
	struct batched_entropy *batch;

	if (arch_get_random_long(&ret))
#if BITS_PER_LONG == 64
	if (arch_get_random_long((unsigned long *)&ret))
		return ret;
#else
	if (arch_get_random_long((unsigned long *)&ret) &&
	    arch_get_random_long((unsigned long *)&ret + 1))
	    return ret;
#endif

	batch = &get_cpu_var(batched_entropy_long);
	if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
		extract_crng((u8 *)batch->entropy_long);
	batch = &get_cpu_var(batched_entropy_u64);
	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
		extract_crng((u8 *)batch->entropy_u64);
		batch->position = 0;
	}
	ret = batch->entropy_long[batch->position++];
	put_cpu_var(batched_entropy_long);
	ret = batch->entropy_u64[batch->position++];
	put_cpu_var(batched_entropy_u64);
	return ret;
}
EXPORT_SYMBOL(get_random_long);
EXPORT_SYMBOL(get_random_u64);

#if BITS_PER_LONG == 32
unsigned int get_random_int(void)
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
	return get_random_long();
}
#else
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
unsigned int get_random_int(void)
{
	unsigned int ret;
	u32 ret;
	struct batched_entropy *batch;

	if (arch_get_random_int(&ret))
		return ret;

	batch = &get_cpu_var(batched_entropy_int);
	if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
		extract_crng((u8 *)batch->entropy_int);
	batch = &get_cpu_var(batched_entropy_u32);
	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
		extract_crng((u8 *)batch->entropy_u32);
		batch->position = 0;
	}
	ret = batch->entropy_int[batch->position++];
	put_cpu_var(batched_entropy_int);
	ret = batch->entropy_u32[batch->position++];
	put_cpu_var(batched_entropy_u32);
	return ret;
}
#endif
EXPORT_SYMBOL(get_random_int);
EXPORT_SYMBOL(get_random_u32);

/**
 * randomize_page - Generate a random, page aligned address
+15 −2
Original line number Diff line number Diff line
@@ -42,8 +42,21 @@ extern void get_random_bytes_arch(void *buf, int nbytes);
extern const struct file_operations random_fops, urandom_fops;
#endif

unsigned int get_random_int(void);
unsigned long get_random_long(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
static inline unsigned int get_random_int(void)
{
	return get_random_u32();
}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
	return get_random_u64();
#else
	return get_random_u32();
#endif
}

unsigned long randomize_page(unsigned long start, unsigned long range);

u32 prandom_u32(void);
+12 −5
Original line number Diff line number Diff line
@@ -1517,6 +1517,18 @@ static __latent_entropy struct task_struct *copy_process(
	if (!p)
		goto fork_out;

	/*
	 * This _must_ happen before we call free_task(), i.e. before we jump
	 * to any of the bad_fork_* labels. This is to avoid freeing
	 * p->set_child_tid which is (ab)used as a kthread's data pointer for
	 * kernel threads (PF_KTHREAD).
	 */
	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);
@@ -1678,11 +1690,6 @@ static __latent_entropy struct task_struct *copy_process(
		}
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif