1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/interrupt.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/suspend.h>
55 #include <linux/siphash.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
60 #include <asm/irq_regs.h>
63 /*********************************************************************
65 * Initialization and readiness waiting.
67 * Much of the RNG infrastructure is devoted to various dependencies
68 * being able to wait until the RNG has collected enough entropy and
69 * is ready for safe consumption.
71 *********************************************************************/
74 * crng_init is protected by base_crng->lock, and only increases
75 * its value (from empty->early->ready).
78 CRNG_EMPTY = 0, /* Little to no entropy collected */
79 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
80 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init = CRNG_EMPTY;
82 #define crng_ready() (likely(crng_init >= CRNG_READY))
83 /* Various types of waiters for crng_init->CRNG_READY transition. */
84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
85 static struct fasync_struct *fasync;
86 static DEFINE_SPINLOCK(random_ready_chain_lock);
87 static RAW_NOTIFIER_HEAD(random_ready_chain);
89 /* Control how we warn userspace. */
90 static struct ratelimit_state unseeded_warning =
91 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
92 static struct ratelimit_state urandom_warning =
93 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
94 static int ratelimit_disable __read_mostly;
95 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
96 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
99 * Returns whether or not the input pool has been seeded and thus guaranteed
100 * to supply cryptographically secure random numbers. This applies to: the
101 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
102 * ,u64,int,long} family of functions.
104 * Returns: true if the input pool has been seeded.
105 * false if the input pool has not been seeded.
107 bool rng_is_initialized(void)
111 EXPORT_SYMBOL(rng_is_initialized);
113 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
114 static void try_to_generate_entropy(void);
117 * Wait for the input pool to be seeded and thus guaranteed to supply
118 * cryptographically secure random numbers. This applies to: the /dev/urandom
119 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
120 * family of functions. Using any of these functions without first calling
121 * this function forfeits the guarantee of security.
123 * Returns: 0 if the input pool has been seeded.
124 * -ERESTARTSYS if the function was interrupted by a signal.
126 int wait_for_random_bytes(void)
128 while (!crng_ready()) {
131 try_to_generate_entropy();
132 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
134 return ret > 0 ? 0 : ret;
138 EXPORT_SYMBOL(wait_for_random_bytes);
141 * Add a callback function that will be invoked when the input
142 * pool is initialised.
144 * returns: 0 if callback is successfully added
145 * -EALREADY if pool is already initialised (callback not called)
147 int register_random_ready_notifier(struct notifier_block *nb)
155 spin_lock_irqsave(&random_ready_chain_lock, flags);
157 ret = raw_notifier_chain_register(&random_ready_chain, nb);
158 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
163 * Delete a previously registered readiness callback function.
165 int unregister_random_ready_notifier(struct notifier_block *nb)
170 spin_lock_irqsave(&random_ready_chain_lock, flags);
171 ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
172 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
176 static void process_random_ready_list(void)
180 spin_lock_irqsave(&random_ready_chain_lock, flags);
181 raw_notifier_call_chain(&random_ready_chain, 0, NULL);
182 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
185 #define warn_unseeded_randomness(previous) \
186 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
188 static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
190 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
191 const bool print_once = false;
193 static bool print_once __read_mostly;
196 if (print_once || crng_ready() ||
197 (previous && (caller == READ_ONCE(*previous))))
199 WRITE_ONCE(*previous, caller);
200 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
203 if (__ratelimit(&unseeded_warning))
204 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
205 func_name, caller, crng_init);
209 /*********************************************************************
211 * Fast key erasure RNG, the "crng".
213 * These functions expand entropy from the entropy extractor into
214 * long streams for external consumption using the "fast key erasure"
215 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
217 * There are a few exported interfaces for use by other drivers:
219 * void get_random_bytes(void *buf, size_t nbytes)
220 * u32 get_random_u32()
221 * u64 get_random_u64()
222 * unsigned int get_random_int()
223 * unsigned long get_random_long()
225 * These interfaces will return the requested number of random bytes
226 * into the given buffer or as a return value. This is equivalent to
227 * a read from /dev/urandom. The u32, u64, int, and long family of
228 * functions may be higher performance for one-off random integers,
229 * because they do a bit of buffering and do not invoke reseeding
230 * until the buffer is emptied.
232 *********************************************************************/
235 CRNG_RESEED_START_INTERVAL = HZ,
236 CRNG_RESEED_INTERVAL = 60 * HZ
240 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
242 unsigned long generation;
245 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
249 u8 key[CHACHA_KEY_SIZE];
250 unsigned long generation;
254 static DEFINE_PER_CPU(struct crng, crngs) = {
255 .generation = ULONG_MAX,
256 .lock = INIT_LOCAL_LOCK(crngs.lock),
259 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
260 static void extract_entropy(void *buf, size_t nbytes);
262 /* This extracts a new crng key from the input pool. */
263 static void crng_reseed(void)
266 unsigned long next_gen;
267 u8 key[CHACHA_KEY_SIZE];
269 extract_entropy(key, sizeof(key));
272 * We copy the new key into the base_crng, overwriting the old one,
273 * and update the generation counter. We avoid hitting ULONG_MAX,
274 * because the per-cpu crngs are initialized to ULONG_MAX, so this
275 * forces new CPUs that come online to always initialize.
277 spin_lock_irqsave(&base_crng.lock, flags);
278 memcpy(base_crng.key, key, sizeof(base_crng.key));
279 next_gen = base_crng.generation + 1;
280 if (next_gen == ULONG_MAX)
282 WRITE_ONCE(base_crng.generation, next_gen);
283 WRITE_ONCE(base_crng.birth, jiffies);
285 crng_init = CRNG_READY;
286 spin_unlock_irqrestore(&base_crng.lock, flags);
287 memzero_explicit(key, sizeof(key));
291 * This generates a ChaCha block using the provided key, and then
292 * immediately overwites that key with half the block. It returns
293 * the resultant ChaCha state to the user, along with the second
294 * half of the block containing 32 bytes of random data that may
295 * be used; random_data_len may not be greater than 32.
297 * The returned ChaCha state contains within it a copy of the old
298 * key value, at index 4, so the state should always be zeroed out
299 * immediately after using in order to maintain forward secrecy.
300 * If the state cannot be erased in a timely manner, then it is
301 * safer to set the random_data parameter to &chacha_state[4] so
302 * that this function overwrites it before returning.
304 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
305 u32 chacha_state[CHACHA_STATE_WORDS],
306 u8 *random_data, size_t random_data_len)
308 u8 first_block[CHACHA_BLOCK_SIZE];
310 BUG_ON(random_data_len > 32);
312 chacha_init_consts(chacha_state);
313 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
314 memset(&chacha_state[12], 0, sizeof(u32) * 4);
315 chacha20_block(chacha_state, first_block);
317 memcpy(key, first_block, CHACHA_KEY_SIZE);
318 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
319 memzero_explicit(first_block, sizeof(first_block));
323 * Return whether the crng seed is considered to be sufficiently old
324 * that a reseeding is needed. This happens if the last reseeding
325 * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
326 * proportional to the uptime.
328 static bool crng_has_old_seed(void)
330 static bool early_boot = true;
331 unsigned long interval = CRNG_RESEED_INTERVAL;
333 if (unlikely(READ_ONCE(early_boot))) {
334 time64_t uptime = ktime_get_seconds();
335 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
336 WRITE_ONCE(early_boot, false);
338 interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
339 (unsigned int)uptime / 2 * HZ);
341 return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
345 * This function returns a ChaCha state that you may use for generating
346 * random data. It also returns up to 32 bytes on its own of random data
347 * that may be used; random_data_len may not be greater than 32.
349 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
350 u8 *random_data, size_t random_data_len)
355 BUG_ON(random_data_len > 32);
358 * For the fast path, we check whether we're ready, unlocked first, and
359 * then re-check once locked later. In the case where we're really not
360 * ready, we do fast key erasure with the base_crng directly, extracting
361 * when crng_init is CRNG_EMPTY.
366 spin_lock_irqsave(&base_crng.lock, flags);
367 ready = crng_ready();
369 if (crng_init == CRNG_EMPTY)
370 extract_entropy(base_crng.key, sizeof(base_crng.key));
371 crng_fast_key_erasure(base_crng.key, chacha_state,
372 random_data, random_data_len);
374 spin_unlock_irqrestore(&base_crng.lock, flags);
380 * If the base_crng is old enough, we reseed, which in turn bumps the
381 * generation counter that we check below.
383 if (unlikely(crng_has_old_seed()))
386 local_lock_irqsave(&crngs.lock, flags);
387 crng = raw_cpu_ptr(&crngs);
390 * If our per-cpu crng is older than the base_crng, then it means
391 * somebody reseeded the base_crng. In that case, we do fast key
392 * erasure on the base_crng, and use its output as the new key
393 * for our per-cpu crng. This brings us up to date with base_crng.
395 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
396 spin_lock(&base_crng.lock);
397 crng_fast_key_erasure(base_crng.key, chacha_state,
398 crng->key, sizeof(crng->key));
399 crng->generation = base_crng.generation;
400 spin_unlock(&base_crng.lock);
404 * Finally, when we've made it this far, our per-cpu crng has an up
405 * to date key, and we can do fast key erasure with it to produce
406 * some random data and a ChaCha state for the caller. All other
407 * branches of this function are "unlikely", so most of the time we
408 * should wind up here immediately.
410 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
411 local_unlock_irqrestore(&crngs.lock, flags);
414 static void _get_random_bytes(void *buf, size_t nbytes)
416 u32 chacha_state[CHACHA_STATE_WORDS];
417 u8 tmp[CHACHA_BLOCK_SIZE];
423 len = min_t(size_t, 32, nbytes);
424 crng_make_state(chacha_state, buf, len);
429 if (nbytes < CHACHA_BLOCK_SIZE) {
430 chacha20_block(chacha_state, tmp);
431 memcpy(buf, tmp, nbytes);
432 memzero_explicit(tmp, sizeof(tmp));
436 chacha20_block(chacha_state, buf);
437 if (unlikely(chacha_state[12] == 0))
439 nbytes -= CHACHA_BLOCK_SIZE;
440 buf += CHACHA_BLOCK_SIZE;
443 memzero_explicit(chacha_state, sizeof(chacha_state));
447 * This function is the exported kernel interface. It returns some
448 * number of good random numbers, suitable for key generation, seeding
449 * TCP sequence numbers, etc. It does not rely on the hardware random
450 * number generator. For random bytes direct from the hardware RNG
451 * (when available), use get_random_bytes_arch(). In order to ensure
452 * that the randomness provided by this function is okay, the function
453 * wait_for_random_bytes() should be called and return 0 at least once
454 * at any point prior.
456 void get_random_bytes(void *buf, size_t nbytes)
458 static void *previous;
460 warn_unseeded_randomness(&previous);
461 _get_random_bytes(buf, nbytes);
463 EXPORT_SYMBOL(get_random_bytes);
465 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
467 size_t len, left, ret = 0;
468 u32 chacha_state[CHACHA_STATE_WORDS];
469 u8 output[CHACHA_BLOCK_SIZE];
475 * Immediately overwrite the ChaCha key at index 4 with random
476 * bytes, in case userspace causes copy_to_user() below to sleep
477 * forever, so that we still retain forward secrecy in that case.
479 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
481 * However, if we're doing a read of len <= 32, we don't need to
482 * use chacha_state after, so we can simply return those bytes to
485 if (nbytes <= CHACHA_KEY_SIZE) {
486 ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
487 goto out_zero_chacha;
491 chacha20_block(chacha_state, output);
492 if (unlikely(chacha_state[12] == 0))
495 len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
496 left = copy_to_user(buf, output, len);
508 BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
509 if (ret % PAGE_SIZE == 0) {
510 if (signal_pending(current))
516 memzero_explicit(output, sizeof(output));
518 memzero_explicit(chacha_state, sizeof(chacha_state));
519 return ret ? ret : -EFAULT;
523 * Batched entropy returns random integers. The quality of the random
524 * number is good as /dev/urandom. In order to ensure that the randomness
525 * provided by this function is okay, the function wait_for_random_bytes()
526 * should be called and return 0 at least once at any point prior.
528 struct batched_entropy {
531 * We make this 1.5x a ChaCha block, so that we get the
532 * remaining 32 bytes from fast key erasure, plus one full
533 * block from the detached ChaCha state. We can increase
534 * the size of this later if needed so long as we keep the
535 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
537 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
538 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
541 unsigned long generation;
542 unsigned int position;
546 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
547 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
551 u64 get_random_u64(void)
555 struct batched_entropy *batch;
556 static void *previous;
557 unsigned long next_gen;
559 warn_unseeded_randomness(&previous);
562 _get_random_bytes(&ret, sizeof(ret));
566 local_lock_irqsave(&batched_entropy_u64.lock, flags);
567 batch = raw_cpu_ptr(&batched_entropy_u64);
569 next_gen = READ_ONCE(base_crng.generation);
570 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
571 next_gen != batch->generation) {
572 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
574 batch->generation = next_gen;
577 ret = batch->entropy_u64[batch->position];
578 batch->entropy_u64[batch->position] = 0;
580 local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
583 EXPORT_SYMBOL(get_random_u64);
585 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
586 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
590 u32 get_random_u32(void)
594 struct batched_entropy *batch;
595 static void *previous;
596 unsigned long next_gen;
598 warn_unseeded_randomness(&previous);
601 _get_random_bytes(&ret, sizeof(ret));
605 local_lock_irqsave(&batched_entropy_u32.lock, flags);
606 batch = raw_cpu_ptr(&batched_entropy_u32);
608 next_gen = READ_ONCE(base_crng.generation);
609 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
610 next_gen != batch->generation) {
611 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
613 batch->generation = next_gen;
616 ret = batch->entropy_u32[batch->position];
617 batch->entropy_u32[batch->position] = 0;
619 local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
622 EXPORT_SYMBOL(get_random_u32);
626 * This function is called when the CPU is coming up, with entry
627 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
629 int random_prepare_cpu(unsigned int cpu)
632 * When the cpu comes back online, immediately invalidate both
633 * the per-cpu crng and all batches, so that we serve fresh
636 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
637 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
638 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
644 * randomize_page - Generate a random, page aligned address
645 * @start: The smallest acceptable address the caller will take.
646 * @range: The size of the area, starting at @start, within which the
647 * random address must fall.
649 * If @start + @range would overflow, @range is capped.
651 * NOTE: Historical use of randomize_range, which this replaces, presumed that
652 * @start was already page aligned. We now align it regardless.
654 * Return: A page aligned address within [start, start + range). On error,
655 * @start is returned.
657 unsigned long randomize_page(unsigned long start, unsigned long range)
659 if (!PAGE_ALIGNED(start)) {
660 range -= PAGE_ALIGN(start) - start;
661 start = PAGE_ALIGN(start);
664 if (start > ULONG_MAX - range)
665 range = ULONG_MAX - start;
667 range >>= PAGE_SHIFT;
672 return start + (get_random_long() % range << PAGE_SHIFT);
676 * This function will use the architecture-specific hardware random
677 * number generator if it is available. It is not recommended for
678 * use. Use get_random_bytes() instead. It returns the number of
681 size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
683 size_t left = nbytes;
688 size_t chunk = min_t(size_t, left, sizeof(unsigned long));
690 if (!arch_get_random_long(&v))
693 memcpy(p, &v, chunk);
698 return nbytes - left;
700 EXPORT_SYMBOL(get_random_bytes_arch);
703 /**********************************************************************
705 * Entropy accumulation and extraction routines.
707 * Callers may add entropy via:
709 * static void mix_pool_bytes(const void *in, size_t nbytes)
711 * After which, if added entropy should be credited:
713 * static void credit_init_bits(size_t nbits)
715 * Finally, extract entropy via:
717 * static void extract_entropy(void *buf, size_t nbytes)
719 **********************************************************************/
722 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
723 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
724 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
728 struct blake2s_state hash;
730 unsigned int init_bits;
732 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
733 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
734 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
735 .hash.outlen = BLAKE2S_HASH_SIZE,
736 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
739 static void _mix_pool_bytes(const void *in, size_t nbytes)
741 blake2s_update(&input_pool.hash, in, nbytes);
745 * This function adds bytes into the input pool. It does not
746 * update the initialization bit counter; the caller should call
747 * credit_init_bits if this is appropriate.
749 static void mix_pool_bytes(const void *in, size_t nbytes)
753 spin_lock_irqsave(&input_pool.lock, flags);
754 _mix_pool_bytes(in, nbytes);
755 spin_unlock_irqrestore(&input_pool.lock, flags);
759 * This is an HKDF-like construction for using the hashed collected entropy
760 * as a PRF key, that's then expanded block-by-block.
762 static void extract_entropy(void *buf, size_t nbytes)
765 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
767 unsigned long rdseed[32 / sizeof(long)];
772 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
773 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
774 !arch_get_random_long(&block.rdseed[i]))
775 block.rdseed[i] = random_get_entropy();
778 spin_lock_irqsave(&input_pool.lock, flags);
780 /* seed = HASHPRF(last_key, entropy_input) */
781 blake2s_final(&input_pool.hash, seed);
783 /* next_key = HASHPRF(seed, RDSEED || 0) */
785 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
786 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
788 spin_unlock_irqrestore(&input_pool.lock, flags);
789 memzero_explicit(next_key, sizeof(next_key));
792 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
793 /* output = HASHPRF(seed, RDSEED || ++counter) */
795 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
800 memzero_explicit(seed, sizeof(seed));
801 memzero_explicit(&block, sizeof(block));
804 static void credit_init_bits(size_t nbits)
806 unsigned int new, orig, add;
809 if (crng_ready() || !nbits)
812 add = min_t(size_t, nbits, POOL_BITS);
815 orig = READ_ONCE(input_pool.init_bits);
816 new = min_t(unsigned int, POOL_BITS, orig + add);
817 } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
819 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
820 crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
821 process_random_ready_list();
822 wake_up_interruptible(&crng_init_wait);
823 kill_fasync(&fasync, SIGIO, POLL_IN);
824 pr_notice("crng init done\n");
825 if (unseeded_warning.missed) {
826 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
827 unseeded_warning.missed);
828 unseeded_warning.missed = 0;
830 if (urandom_warning.missed) {
831 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
832 urandom_warning.missed);
833 urandom_warning.missed = 0;
835 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
836 spin_lock_irqsave(&base_crng.lock, flags);
837 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
838 if (crng_init == CRNG_EMPTY) {
839 extract_entropy(base_crng.key, sizeof(base_crng.key));
840 crng_init = CRNG_EARLY;
842 spin_unlock_irqrestore(&base_crng.lock, flags);
847 /**********************************************************************
849 * Entropy collection routines.
851 * The following exported functions are used for pushing entropy into
852 * the above entropy accumulation routines:
854 * void add_device_randomness(const void *buf, size_t size);
855 * void add_hwgenerator_randomness(const void *buffer, size_t count,
857 * void add_bootloader_randomness(const void *buf, size_t size);
858 * void add_vmfork_randomness(const void *unique_vm_id, size_t size);
859 * void add_interrupt_randomness(int irq);
860 * void add_input_randomness(unsigned int type, unsigned int code,
861 * unsigned int value);
862 * void add_disk_randomness(struct gendisk *disk);
864 * add_device_randomness() adds data to the input pool that
865 * is likely to differ between two devices (or possibly even per boot).
866 * This would be things like MAC addresses or serial numbers, or the
867 * read-out of the RTC. This does *not* credit any actual entropy to
868 * the pool, but it initializes the pool to different values for devices
869 * that might otherwise be identical and have very little entropy
870 * available to them (particularly common in the embedded world).
872 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
873 * entropy as specified by the caller. If the entropy pool is full it will
874 * block until more entropy is needed.
876 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
877 * and device tree, and credits its input depending on whether or not the
878 * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
880 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
881 * representing the current instance of a VM to the pool, without crediting,
882 * and then force-reseeds the crng so that it takes effect immediately.
884 * add_interrupt_randomness() uses the interrupt timing as random
885 * inputs to the entropy pool. Using the cycle counters and the irq source
886 * as inputs, it feeds the input pool roughly once a second or after 64
887 * interrupts, crediting 1 bit of entropy for whichever comes first.
889 * add_input_randomness() uses the input layer interrupt timing, as well
890 * as the event type information from the hardware.
892 * add_disk_randomness() uses what amounts to the seek time of block
893 * layer request events, on a per-disk_devt basis, as input to the
894 * entropy pool. Note that high-speed solid state drives with very low
895 * seek times do not make for good sources of entropy, as their seek
896 * times are usually fairly consistent.
898 * The last two routines try to estimate how many bits of entropy
899 * to credit. They do this by keeping track of the first and second
900 * order deltas of the event timings.
902 **********************************************************************/
904 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
905 static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
906 static int __init parse_trust_cpu(char *arg)
908 return kstrtobool(arg, &trust_cpu);
910 static int __init parse_trust_bootloader(char *arg)
912 return kstrtobool(arg, &trust_bootloader);
914 early_param("random.trust_cpu", parse_trust_cpu);
915 early_param("random.trust_bootloader", parse_trust_bootloader);
917 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
919 unsigned long flags, entropy = random_get_entropy();
922 * Encode a representation of how long the system has been suspended,
923 * in a way that is distinct from prior system suspends.
925 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
927 spin_lock_irqsave(&input_pool.lock, flags);
928 _mix_pool_bytes(&action, sizeof(action));
929 _mix_pool_bytes(stamps, sizeof(stamps));
930 _mix_pool_bytes(&entropy, sizeof(entropy));
931 spin_unlock_irqrestore(&input_pool.lock, flags);
933 if (crng_ready() && (action == PM_RESTORE_PREPARE ||
934 (action == PM_POST_SUSPEND &&
935 !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) {
937 pr_notice("crng reseeded on system resumption\n");
942 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
945 * The first collection of entropy occurs at system boot while interrupts
946 * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
947 * Depending on the above configuration knob, RDSEED may be considered
948 * sufficient for initialization. Note that much earlier setup may already
949 * have pushed entropy into the input pool by the time we get here.
951 int __init rand_initialize(void)
954 ktime_t now = ktime_get_real();
955 bool arch_init = true;
958 #if defined(LATENT_ENTROPY_PLUGIN)
959 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
960 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
963 for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
964 if (!arch_get_random_seed_long_early(&rv) &&
965 !arch_get_random_long_early(&rv)) {
966 rv = random_get_entropy();
969 _mix_pool_bytes(&rv, sizeof(rv));
971 _mix_pool_bytes(&now, sizeof(now));
972 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
976 else if (arch_init && trust_cpu)
977 credit_init_bits(BLAKE2S_BLOCK_SIZE * 8);
979 if (ratelimit_disable) {
980 urandom_warning.interval = 0;
981 unseeded_warning.interval = 0;
984 WARN_ON(register_pm_notifier(&pm_notifier));
986 WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
987 "entropy collection will consequently suffer.");
992 * Add device- or boot-specific data to the input pool to help
995 * None of this adds any entropy; it is meant to avoid the problem of
996 * the entropy pool having similar initial state across largely
999 void add_device_randomness(const void *buf, size_t size)
1001 unsigned long entropy = random_get_entropy();
1002 unsigned long flags;
1004 spin_lock_irqsave(&input_pool.lock, flags);
1005 _mix_pool_bytes(&entropy, sizeof(entropy));
1006 _mix_pool_bytes(buf, size);
1007 spin_unlock_irqrestore(&input_pool.lock, flags);
1009 EXPORT_SYMBOL(add_device_randomness);
1012 * Interface for in-kernel drivers of true hardware RNGs.
1013 * Those devices may produce endless random bits and will be throttled
1014 * when our pool is full.
1016 void add_hwgenerator_randomness(const void *buffer, size_t count,
1019 mix_pool_bytes(buffer, count);
1020 credit_init_bits(entropy);
1023 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
1024 * we're not yet initialized.
1026 if (!kthread_should_stop() && crng_ready())
1027 schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
1029 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
1032 * Handle random seed passed by bootloader, and credit it if
1033 * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
1035 void add_bootloader_randomness(const void *buf, size_t size)
1037 mix_pool_bytes(buf, size);
1038 if (trust_bootloader)
1039 credit_init_bits(size * 8);
1041 EXPORT_SYMBOL_GPL(add_bootloader_randomness);
1043 #if IS_ENABLED(CONFIG_VMGENID)
1044 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
1047 * Handle a new unique VM ID, which is unique, not secret, so we
1048 * don't credit it, but we do immediately force a reseed after so
1049 * that it's used by the crng posthaste.
1051 void add_vmfork_randomness(const void *unique_vm_id, size_t size)
1053 add_device_randomness(unique_vm_id, size);
1056 pr_notice("crng reseeded due to virtual machine fork\n");
1058 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
1060 #if IS_MODULE(CONFIG_VMGENID)
1061 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
1064 int register_random_vmfork_notifier(struct notifier_block *nb)
1066 return blocking_notifier_chain_register(&vmfork_chain, nb);
1068 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
1070 int unregister_random_vmfork_notifier(struct notifier_block *nb)
1072 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
1074 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
1078 struct work_struct mix;
1079 unsigned long pool[4];
1084 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
1086 #define FASTMIX_PERM SIPHASH_PERMUTATION
1087 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
1089 #define FASTMIX_PERM HSIPHASH_PERMUTATION
1090 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
1095 * This is [Half]SipHash-1-x, starting from an empty key. Because
1096 * the key is fixed, it assumes that its inputs are non-malicious,
1097 * and therefore this has no security on its own. s represents the
1098 * four-word SipHash state, while v represents a two-word input.
1100 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
1103 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1106 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1112 * This function is called when the CPU has just come online, with
1113 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1115 int random_online_cpu(unsigned int cpu)
1118 * During CPU shutdown and before CPU onlining, add_interrupt_
1119 * randomness() may schedule mix_interrupt_randomness(), and
1120 * set the MIX_INFLIGHT flag. However, because the worker can
1121 * be scheduled on a different CPU during this period, that
1122 * flag will never be cleared. For that reason, we zero out
1123 * the flag here, which runs just after workqueues are onlined
1124 * for the CPU again. This also has the effect of setting the
1125 * irq randomness count to zero so that new accumulated irqs
1128 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1133 static void mix_interrupt_randomness(struct work_struct *work)
1135 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1137 * The size of the copied stack pool is explicitly 2 longs so that we
1138 * only ever ingest half of the siphash output each time, retaining
1139 * the other half as the next "key" that carries over. The entropy is
1140 * supposed to be sufficiently dispersed between bits so on average
1141 * we don't wind up "losing" some.
1143 unsigned long pool[2];
1146 /* Check to see if we're running on the wrong CPU due to hotplug. */
1147 local_irq_disable();
1148 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1154 * Copy the pool to the stack so that the mixer always has a
1155 * consistent view, before we reenable irqs again.
1157 memcpy(pool, fast_pool->pool, sizeof(pool));
1158 count = fast_pool->count;
1159 fast_pool->count = 0;
1160 fast_pool->last = jiffies;
1163 mix_pool_bytes(pool, sizeof(pool));
1164 credit_init_bits(max(1u, (count & U16_MAX) / 64));
1166 memzero_explicit(pool, sizeof(pool));
1169 void add_interrupt_randomness(int irq)
1171 enum { MIX_INFLIGHT = 1U << 31 };
1172 unsigned long entropy = random_get_entropy();
1173 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1174 struct pt_regs *regs = get_irq_regs();
1175 unsigned int new_count;
1177 fast_mix(fast_pool->pool, entropy,
1178 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1179 new_count = ++fast_pool->count;
1181 if (new_count & MIX_INFLIGHT)
1184 if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
1187 if (unlikely(!fast_pool->mix.func))
1188 INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
1189 fast_pool->count |= MIX_INFLIGHT;
1190 queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
1192 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1194 /* There is one of these per entropy source */
1195 struct timer_rand_state {
1196 unsigned long last_time;
1197 long last_delta, last_delta2;
1201 * This function adds entropy to the entropy "pool" by using timing
1202 * delays. It uses the timer_rand_state structure to make an estimate
1203 * of how many bits of entropy this call has added to the pool. The
1204 * value "num" is also added to the pool; it should somehow describe
1205 * the type of event that just happened.
1207 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1209 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1210 long delta, delta2, delta3;
1214 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1215 * sometime after, so mix into the fast pool.
1218 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1220 spin_lock_irqsave(&input_pool.lock, flags);
1221 _mix_pool_bytes(&entropy, sizeof(entropy));
1222 _mix_pool_bytes(&num, sizeof(num));
1223 spin_unlock_irqrestore(&input_pool.lock, flags);
1230 * Calculate number of bits of randomness we probably added.
1231 * We take into account the first, second and third-order deltas
1232 * in order to make our estimate.
1234 delta = now - READ_ONCE(state->last_time);
1235 WRITE_ONCE(state->last_time, now);
1237 delta2 = delta - READ_ONCE(state->last_delta);
1238 WRITE_ONCE(state->last_delta, delta);
1240 delta3 = delta2 - READ_ONCE(state->last_delta2);
1241 WRITE_ONCE(state->last_delta2, delta2);
1255 * delta is now minimum absolute delta. Round down by 1 bit
1256 * on general principles, and limit entropy estimate to 11 bits.
1258 bits = min(fls(delta >> 1), 11);
1261 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1262 * will run after this, which uses a different crediting scheme of 1 bit
1263 * per every 64 interrupts. In order to let that function do accounting
1264 * close to the one in this function, we credit a full 64/64 bit per bit,
1265 * and then subtract one to account for the extra one added.
1268 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1270 credit_init_bits(bits);
1273 void add_input_randomness(unsigned int type, unsigned int code,
1276 static unsigned char last_value;
1277 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1279 /* Ignore autorepeat and the like. */
1280 if (value == last_value)
1284 add_timer_randomness(&input_timer_state,
1285 (type << 4) ^ code ^ (code >> 4) ^ value);
1287 EXPORT_SYMBOL_GPL(add_input_randomness);
1290 void add_disk_randomness(struct gendisk *disk)
1292 if (!disk || !disk->random)
1294 /* First major is 1, so we get >= 0x200 here. */
1295 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1297 EXPORT_SYMBOL_GPL(add_disk_randomness);
1299 void rand_initialize_disk(struct gendisk *disk)
1301 struct timer_rand_state *state;
1304 * If kzalloc returns null, we just won't use that entropy
1307 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1309 state->last_time = INITIAL_JIFFIES;
1310 disk->random = state;
1315 struct entropy_timer_state {
1316 unsigned long entropy;
1317 struct timer_list timer;
1318 unsigned int samples, samples_per_bit;
1322 * Each time the timer fires, we expect that we got an unpredictable
1323 * jump in the cycle counter. Even if the timer is running on another
1324 * CPU, the timer activity will be touching the stack of the CPU that is
1325 * generating entropy..
1327 * Note that we don't re-arm the timer in the timer itself - we are
1328 * happy to be scheduled away, since that just makes the load more
1329 * complex, but we do not want the timer to keep ticking unless the
1330 * entropy loop is running.
1332 * So the re-arming always happens in the entropy loop itself.
1334 static void entropy_timer(struct timer_list *timer)
1336 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1338 if (++state->samples == state->samples_per_bit) {
1339 credit_init_bits(1);
1345 * If we have an actual cycle counter, see if we can
1346 * generate enough entropy with timing noise
1348 static void try_to_generate_entropy(void)
1350 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
1351 struct entropy_timer_state stack;
1352 unsigned int i, num_different = 0;
1353 unsigned long last = random_get_entropy();
1355 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1356 stack.entropy = random_get_entropy();
1357 if (stack.entropy != last)
1359 last = stack.entropy;
1361 stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1362 if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
1366 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1367 while (!crng_ready() && !signal_pending(current)) {
1368 if (!timer_pending(&stack.timer))
1369 mod_timer(&stack.timer, jiffies + 1);
1370 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1372 stack.entropy = random_get_entropy();
1375 del_timer_sync(&stack.timer);
1376 destroy_timer_on_stack(&stack.timer);
1377 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1381 /**********************************************************************
1383 * Userspace reader/writer interfaces.
1385 * getrandom(2) is the primary modern interface into the RNG and should
1386 * be used in preference to anything else.
1388 * Reading from /dev/random has the same functionality as calling
1389 * getrandom(2) with flags=0. In earlier versions, however, it had
1390 * vastly different semantics and should therefore be avoided, to
1391 * prevent backwards compatibility issues.
1393 * Reading from /dev/urandom has the same functionality as calling
1394 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1395 * waiting for the RNG to be ready, it should not be used.
1397 * Writing to either /dev/random or /dev/urandom adds entropy to
1398 * the input pool but does not credit it.
1400 * Polling on /dev/random indicates when the RNG is initialized, on
1401 * the read side, and when it wants new entropy, on the write side.
1403 * Both /dev/random and /dev/urandom have the same set of ioctls for
1404 * adding entropy, getting the entropy count, zeroing the count, and
1405 * reseeding the crng.
1407 **********************************************************************/
1409 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1412 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1416 * Requesting insecure and blocking randomness at the same time makes
1419 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1422 if (count > INT_MAX)
1425 if (!(flags & GRND_INSECURE) && !crng_ready()) {
1428 if (flags & GRND_NONBLOCK)
1430 ret = wait_for_random_bytes();
1434 return get_random_bytes_user(buf, count);
1437 static __poll_t random_poll(struct file *file, poll_table *wait)
1439 poll_wait(file, &crng_init_wait, wait);
1440 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1443 static int write_pool(const char __user *ubuf, size_t count)
1447 u8 block[BLAKE2S_BLOCK_SIZE];
1450 len = min(count, sizeof(block));
1451 if (copy_from_user(block, ubuf, len)) {
1457 mix_pool_bytes(block, len);
1462 memzero_explicit(block, sizeof(block));
1466 static ssize_t random_write(struct file *file, const char __user *buffer,
1467 size_t count, loff_t *ppos)
1471 ret = write_pool(buffer, count);
1475 return (ssize_t)count;
1478 static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1481 static int maxwarn = 10;
1484 * Opportunistically attempt to initialize the RNG on platforms that
1485 * have fast cycle counters, but don't (for now) require it to succeed.
1488 try_to_generate_entropy();
1490 if (!crng_ready() && maxwarn > 0) {
1492 if (__ratelimit(&urandom_warning))
1493 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1494 current->comm, nbytes);
1497 return get_random_bytes_user(buf, nbytes);
1500 static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1505 ret = wait_for_random_bytes();
1508 return get_random_bytes_user(buf, nbytes);
1511 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1513 int size, ent_count;
1514 int __user *p = (int __user *)arg;
1519 /* Inherently racy, no point locking. */
1520 if (put_user(input_pool.init_bits, p))
1523 case RNDADDTOENTCNT:
1524 if (!capable(CAP_SYS_ADMIN))
1526 if (get_user(ent_count, p))
1530 credit_init_bits(ent_count);
1533 if (!capable(CAP_SYS_ADMIN))
1535 if (get_user(ent_count, p++))
1539 if (get_user(size, p++))
1541 retval = write_pool((const char __user *)p, size);
1544 credit_init_bits(ent_count);
1548 /* No longer has any effect. */
1549 if (!capable(CAP_SYS_ADMIN))
1553 if (!capable(CAP_SYS_ADMIN))
1564 static int random_fasync(int fd, struct file *filp, int on)
1566 return fasync_helper(fd, filp, on, &fasync);
1569 const struct file_operations random_fops = {
1570 .read = random_read,
1571 .write = random_write,
1572 .poll = random_poll,
1573 .unlocked_ioctl = random_ioctl,
1574 .compat_ioctl = compat_ptr_ioctl,
1575 .fasync = random_fasync,
1576 .llseek = noop_llseek,
1579 const struct file_operations urandom_fops = {
1580 .read = urandom_read,
1581 .write = random_write,
1582 .unlocked_ioctl = random_ioctl,
1583 .compat_ioctl = compat_ptr_ioctl,
1584 .fasync = random_fasync,
1585 .llseek = noop_llseek,
1589 /********************************************************************
1593 * These are partly unused legacy knobs with dummy values to not break
1594 * userspace and partly still useful things. They are usually accessible
1595 * in /proc/sys/kernel/random/ and are as follows:
1597 * - boot_id - a UUID representing the current boot.
1599 * - uuid - a random UUID, different each time the file is read.
1601 * - poolsize - the number of bits of entropy that the input pool can
1602 * hold, tied to the POOL_BITS constant.
1604 * - entropy_avail - the number of bits of entropy currently in the
1605 * input pool. Always <= poolsize.
1607 * - write_wakeup_threshold - the amount of entropy in the input pool
1608 * below which write polls to /dev/random will unblock, requesting
1609 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1610 * to avoid breaking old userspaces, but writing to it does not
1611 * change any behavior of the RNG.
1613 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1614 * It is writable to avoid breaking old userspaces, but writing
1615 * to it does not change any behavior of the RNG.
1617 ********************************************************************/
1619 #ifdef CONFIG_SYSCTL
1621 #include <linux/sysctl.h>
1623 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1624 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1625 static int sysctl_poolsize = POOL_BITS;
1626 static u8 sysctl_bootid[UUID_SIZE];
1629 * This function is used to return both the bootid UUID, and random
1630 * UUID. The difference is in whether table->data is NULL; if it is,
1631 * then a new UUID is generated and returned to the user.
1633 static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1634 size_t *lenp, loff_t *ppos)
1636 u8 tmp_uuid[UUID_SIZE], *uuid;
1637 char uuid_string[UUID_STRING_LEN + 1];
1638 struct ctl_table fake_table = {
1639 .data = uuid_string,
1640 .maxlen = UUID_STRING_LEN
1649 generate_random_uuid(uuid);
1651 static DEFINE_SPINLOCK(bootid_spinlock);
1653 spin_lock(&bootid_spinlock);
1655 generate_random_uuid(uuid);
1656 spin_unlock(&bootid_spinlock);
1659 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1660 return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
1663 /* The same as proc_dointvec, but writes don't change anything. */
1664 static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
1665 size_t *lenp, loff_t *ppos)
1667 return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
1670 static struct ctl_table random_table[] = {
1672 .procname = "poolsize",
1673 .data = &sysctl_poolsize,
1674 .maxlen = sizeof(int),
1676 .proc_handler = proc_dointvec,
1679 .procname = "entropy_avail",
1680 .data = &input_pool.init_bits,
1681 .maxlen = sizeof(int),
1683 .proc_handler = proc_dointvec,
1686 .procname = "write_wakeup_threshold",
1687 .data = &sysctl_random_write_wakeup_bits,
1688 .maxlen = sizeof(int),
1690 .proc_handler = proc_do_rointvec,
1693 .procname = "urandom_min_reseed_secs",
1694 .data = &sysctl_random_min_urandom_seed,
1695 .maxlen = sizeof(int),
1697 .proc_handler = proc_do_rointvec,
1700 .procname = "boot_id",
1701 .data = &sysctl_bootid,
1703 .proc_handler = proc_do_uuid,
1708 .proc_handler = proc_do_uuid,
1714 * rand_initialize() is called before sysctl_init(),
1715 * so we cannot call register_sysctl_init() in rand_initialize()
1717 static int __init random_sysctls_init(void)
1719 register_sysctl_init("kernel/random", random_table);
1722 device_initcall(random_sysctls_init);