Lines Matching +full:reseed +full:- +full:disable

1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
81 * crng_init is protected by base_crng->lock, and only increases
82 * its value (from empty->early->ready).
91 /* Various types of waiters for crng_init->CRNG_READY transition. */
102 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
135 * -ERESTARTSYS if the function was interrupted by a signal.
164 nb->notifier_call(nb, 0, NULL); in execute_with_initialized_rng()
183 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
200 * functions may be higher performance for one-off random integers,
270 * because the per-cpu crngs are initialized to ULONG_MAX, so this in crng_reseed()
286 * Cast to unsigned long for 32-bit architectures, since atomic 64-bit in crng_reseed()
288 * because base_crng.generation is a 32-bit value. On big-endian in crng_reseed()
293 smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1); in crng_reseed()
348 * then re-check once locked later. In the case where we're really not in crng_make_state()
372 * If our per-cpu crng is older than the base_crng, then it means in crng_make_state()
375 * for our per-cpu crng. This brings us up to date with base_crng. in crng_make_state()
377 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { in crng_make_state()
380 crng->key, sizeof(crng->key)); in crng_make_state()
381 crng->generation = base_crng.generation; in crng_make_state()
386 * Finally, when we've made it this far, our per-cpu crng has an up in crng_make_state()
392 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); in crng_make_state()
407 len -= first_block_len; in _get_random_bytes()
421 len -= CHACHA_BLOCK_SIZE; in _get_random_bytes()
488 return ret ? ret : -EFAULT; in get_random_bytes_user()
536 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
537 next_gen != batch->generation) { \
538 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
539 batch->position = 0; \
540 batch->generation = next_gen; \
543 ret = batch->entropy[batch->position]; \
544 batch->entropy[batch->position] = 0; \
545 ++batch->position; \
564 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable in DEFINE_BATCHED_ENTROPY()
565 * in 32-bits. in DEFINE_BATCHED_ENTROPY()
572 * for the non-underscored constant version in the header, we build bug in DEFINE_BATCHED_ENTROPY()
573 * on that. But for the non-constant case, it's convenient to have that in DEFINE_BATCHED_ENTROPY()
583 u32 bound = -ceil % ceil; in DEFINE_BATCHED_ENTROPY()
600 * the per-cpu crng and all batches, so that we serve fresh in random_prepare_cpu()
603 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; in random_prepare_cpu()
604 per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX; in random_prepare_cpu()
605 per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX; in random_prepare_cpu()
606 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; in random_prepare_cpu()
607 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; in random_prepare_cpu()
633 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
634 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
669 * This is an HKDF-like construction for using the hashed collected entropy
670 * as a PRF key, that's then expanded block-by-block.
683 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); in extract_entropy()
688 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); in extract_entropy()
714 len -= i; in extract_entropy()
746 WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true); in _credit_init_bits()
784 * read-out of the RTC. This does *not* credit any actual entropy to
799 * and then force-reseeds the crng so that it takes effect immediately.
810 * layer request events, on a per-disk_devt basis, as input to the
811 * entropy pool. Note that high-speed solid state drives with very low
876 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i); in random_init_early()
882 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i); in random_init_early()
888 arch_bits -= sizeof(*entropy) * 8; in random_init_early()
895 /* Reseed if already seeded by earlier phases. */ in random_init_early()
923 /* Reseed if already seeded by earlier phases. */ in random_init()
934 * Add device- or boot-specific data to the input pool to help
954 * Interface for in-kernel drivers of true hardware RNGs. Those devices
964 * Throttle writing to once every reseed interval, unless we're not yet in add_hwgenerator_randomness()
988 * don't credit it, but we do immediately force a reseed after so
1038 * This is [Half]SipHash-1-x, starting from an empty key. Because
1039 * the key is fixed, it assumes that its inputs are non-malicious,
1041 * four-word SipHash state, while v represents a two-word input.
1071 per_cpu_ptr(&irq_randomness, cpu)->count = 0; in random_online_cpu()
1100 memcpy(pool, fast_pool->pool, sizeof(pool)); in mix_interrupt_randomness()
1101 count = fast_pool->count; in mix_interrupt_randomness()
1102 fast_pool->count = 0; in mix_interrupt_randomness()
1103 fast_pool->last = jiffies; in mix_interrupt_randomness()
1120 fast_mix(fast_pool->pool, entropy, in add_interrupt_randomness()
1122 new_count = ++fast_pool->count; in add_interrupt_randomness()
1127 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) in add_interrupt_randomness()
1130 fast_pool->count |= MIX_INFLIGHT; in add_interrupt_randomness()
1131 if (!timer_pending(&fast_pool->mix)) { in add_interrupt_randomness()
1132 fast_pool->mix.expires = jiffies; in add_interrupt_randomness()
1133 add_timer_on(&fast_pool->mix, raw_smp_processor_id()); in add_interrupt_randomness()
1162 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); in add_timer_randomness()
1175 * We take into account the first, second and third-order deltas in add_timer_randomness()
1178 delta = now - READ_ONCE(state->last_time); in add_timer_randomness()
1179 WRITE_ONCE(state->last_time, now); in add_timer_randomness()
1181 delta2 = delta - READ_ONCE(state->last_delta); in add_timer_randomness()
1182 WRITE_ONCE(state->last_delta, delta); in add_timer_randomness()
1184 delta3 = delta2 - READ_ONCE(state->last_delta2); in add_timer_randomness()
1185 WRITE_ONCE(state->last_delta2, delta2); in add_timer_randomness()
1188 delta = -delta; in add_timer_randomness()
1190 delta2 = -delta2; in add_timer_randomness()
1192 delta3 = -delta3; in add_timer_randomness()
1212 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; in add_timer_randomness()
1235 if (!disk || !disk->random) in add_disk_randomness()
1238 add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); in add_disk_randomness()
1252 state->last_time = INITIAL_JIFFIES; in rand_initialize_disk()
1253 disk->random = state; in rand_initialize_disk()
1270 * Note that we don't re-arm the timer in the timer itself - we are happy to be
1274 * So the re-arming always happens in the entropy loop itself.
1282 if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0) in entropy_timer()
1293 u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1]; in try_to_generate_entropy()
1297 int cpu = -1; in try_to_generate_entropy()
1299 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { in try_to_generate_entropy()
1300 stack->entropy = random_get_entropy(); in try_to_generate_entropy()
1301 if (stack->entropy != last) in try_to_generate_entropy()
1303 last = stack->entropy; in try_to_generate_entropy()
1305 stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); in try_to_generate_entropy()
1306 if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT) in try_to_generate_entropy()
1309 atomic_set(&stack->samples, 0); in try_to_generate_entropy()
1310 timer_setup_on_stack(&stack->timer, entropy_timer, 0); in try_to_generate_entropy()
1316 if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { in try_to_generate_entropy()
1335 /* Basic CPU round-robin, which avoids the current CPU. */ in try_to_generate_entropy()
1343 stack->timer.expires = jiffies; in try_to_generate_entropy()
1345 add_timer_on(&stack->timer, cpu); in try_to_generate_entropy()
1349 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); in try_to_generate_entropy()
1351 stack->entropy = random_get_entropy(); in try_to_generate_entropy()
1353 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); in try_to_generate_entropy()
1355 del_timer_sync(&stack->timer); in try_to_generate_entropy()
1356 destroy_timer_on_stack(&stack->timer); in try_to_generate_entropy()
1394 return -EINVAL; in SYSCALL_DEFINE3()
1401 return -EINVAL; in SYSCALL_DEFINE3()
1405 return -EAGAIN; in SYSCALL_DEFINE3()
1448 return ret ? ret : -EFAULT; in write_pool_user()
1471 --maxwarn; in urandom_read_iter()
1473 current->comm, iov_iter_count(iter)); in urandom_read_iter()
1485 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || in random_read_iter()
1486 (kiocb->ki_filp->f_flags & O_NONBLOCK))) in random_read_iter()
1487 return -EAGAIN; in random_read_iter()
1504 return -EFAULT; in random_ioctl()
1508 return -EPERM; in random_ioctl()
1510 return -EFAULT; in random_ioctl()
1512 return -EINVAL; in random_ioctl()
1521 return -EPERM; in random_ioctl()
1523 return -EFAULT; in random_ioctl()
1525 return -EINVAL; in random_ioctl()
1527 return -EFAULT; in random_ioctl()
1536 return -EFAULT; in random_ioctl()
1544 return -EPERM; in random_ioctl()
1548 return -EPERM; in random_ioctl()
1550 return -ENODATA; in random_ioctl()
1554 return -EINVAL; in random_ioctl()
1595 * - boot_id - a UUID representing the current boot.
1597 * - uuid - a random UUID, different each time the file is read.
1599 * - poolsize - the number of bits of entropy that the input pool can
1602 * - entropy_avail - the number of bits of entropy currently in the
1605 * - write_wakeup_threshold - the amount of entropy in the input pool
1611 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1628 * UUID. The difference is in whether table->data is NULL; if it is,
1642 return -EPERM; in proc_do_uuid()
1644 uuid = table->data; in proc_do_uuid()