Commit ba6ef8af authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'random-5.17-rc3-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random

Pull random number generator fixes from Jason Donenfeld:
 "For this week, we have:

   - A fix to make more frequent use of hwgenerator randomness, from
     Dominik.

   - More cleanups to the boot initialization sequence, from Dominik.

   - A fix for an old shortcoming with the ZAP ioctl, from me.

   - A workaround for a still unfixed Clang CFI/FullLTO compiler bug,
     from me. On one hand, it's a bummer to commit workarounds for
     experimental compiler features that have bugs. But on the other, I
     think this actually improves the code somewhat, independent of the
     bug. So a win-win"

* tag 'random-5.17-rc3-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
  random: only call crng_finalize_init() for primary_crng
  random: access primary_pool directly rather than through pointer
  random: wake up /dev/random writers after zap
  random: continually use hwgenerator randomness
  lib/crypto: blake2s: avoid indirect calls to compression function for Clang CFI
parents ddb16b08 9d5505f1
......@@ -13,12 +13,12 @@
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress);
return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
......
......@@ -18,12 +18,12 @@
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress);
return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
......
......@@ -15,12 +15,12 @@
static int crypto_blake2s_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
return crypto_blake2s_update(desc, in, inlen, true);
}
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
return crypto_blake2s_final(desc, out, blake2s_compress_generic);
return crypto_blake2s_final(desc, out, true);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
......
......@@ -762,7 +762,7 @@ static bool crng_init_try_arch(struct crng_state *crng)
return arch_init;
}
static bool __init crng_init_try_arch_early(struct crng_state *crng)
static bool __init crng_init_try_arch_early(void)
{
int i;
bool arch_init = true;
......@@ -774,7 +774,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
rv = random_get_entropy();
arch_init = false;
}
crng->state[i] ^= rv;
primary_crng.state[i] ^= rv;
}
return arch_init;
......@@ -788,22 +788,20 @@ static void crng_initialize_secondary(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
static void __init crng_initialize_primary(struct crng_state *crng)
static void __init crng_initialize_primary(void)
{
_extract_entropy(&crng->state[4], sizeof(u32) * 12);
if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
_extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
static void crng_finalize_init(struct crng_state *crng)
static void crng_finalize_init(void)
{
if (crng != &primary_crng || crng_init >= 2)
return;
if (!system_wq) {
/* We can't call numa_crng_init until we have workqueues,
* so mark this for processing later. */
......@@ -814,6 +812,7 @@ static void crng_finalize_init(struct crng_state *crng)
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
crng_need_final_init = false;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
......@@ -980,7 +979,8 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
crng_finalize_init(crng);
if (crng == &primary_crng && crng_init < 2)
crng_finalize_init();
}
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
......@@ -1697,8 +1697,8 @@ int __init rand_initialize(void)
{
init_std_data();
if (crng_need_final_init)
crng_finalize_init(&primary_crng);
crng_initialize_primary(&primary_crng);
crng_finalize_init();
crng_initialize_primary();
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
......@@ -1856,7 +1856,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
input_pool.entropy_count = 0;
if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
......@@ -2205,13 +2208,15 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
return;
}
/* Suspend writing if we're above the trickle threshold.
/* Throttle writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
* when the calling thread is about to terminate, or once
* CRNG_RESEED_INTERVAL has lapsed.
*/
wait_event_interruptible(random_write_wait,
wait_event_interruptible_timeout(random_write_wait,
!system_wq || kthread_should_stop() ||
POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
CRNG_RESEED_INTERVAL);
mix_pool_bytes(buffer, count);
credit_entropy_bits(entropy);
}
......
......@@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
state->f[0] = -1;
}
typedef void (*blake2s_compress_t)(struct blake2s_state *state,
const u8 *block, size_t nblocks, u32 inc);
/* Helper functions for BLAKE2s shared by the library and shash APIs */
static inline void __blake2s_update(struct blake2s_state *state,
const u8 *in, size_t inlen,
blake2s_compress_t compress)
static __always_inline void
__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
bool force_generic)
{
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
......@@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
(*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
if (force_generic)
blake2s_compress_generic(state, state->buf, 1,
BLAKE2S_BLOCK_SIZE);
else
blake2s_compress(state, state->buf, 1,
BLAKE2S_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
......@@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
/* Hash one less (full) block than strictly possible */
(*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
if (force_generic)
blake2s_compress_generic(state, in, nblocks - 1,
BLAKE2S_BLOCK_SIZE);
else
blake2s_compress(state, in, nblocks - 1,
BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
......@@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state,
state->buflen += inlen;
}
static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
blake2s_compress_t compress)
static __always_inline void
__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
{
blake2s_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
(*compress)(state, state->buf, 1, state->buflen);
if (force_generic)
blake2s_compress_generic(state, state->buf, 1, state->buflen);
else
blake2s_compress(state, state->buf, 1, state->buflen);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
memcpy(out, state->h, state->outlen);
}
......@@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
static inline int crypto_blake2s_update(struct shash_desc *desc,
const u8 *in, unsigned int inlen,
blake2s_compress_t compress)
bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
__blake2s_update(state, in, inlen, compress);
__blake2s_update(state, in, inlen, force_generic);
return 0;
}
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
blake2s_compress_t compress)
bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
__blake2s_final(state, out, compress);
__blake2s_final(state, out, force_generic);
return 0;
}
......
......@@ -18,14 +18,14 @@
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
__blake2s_update(state, in, inlen, blake2s_compress);
__blake2s_update(state, in, inlen, false);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
__blake2s_final(state, out, blake2s_compress);
__blake2s_final(state, out, false);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment