random: get rid of secondary crngs
authorJason A. Donenfeld <Jason@zx2c4.com>
Sun, 6 Feb 2022 22:51:41 +0000 (23:51 +0100)
committerJason A. Donenfeld <Jason@zx2c4.com>
Mon, 21 Feb 2022 15:48:06 +0000 (16:48 +0100)
As the comment said, this is indeed a "hack". Since it was introduced,
it's been a constant state machine nightmare, with lots of subtle early
boot issues and a wildly complex set of machinery to keep everything in
sync. Rather than continuing to play whack-a-mole with this approach,
this commit simply removes it entirely. This commit is preparation for
"random: use simpler fast key erasure flow on per-cpu keys" in this
series, which introduces a simpler (and faster) mechanism to accomplish
the same thing.

Cc: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Dominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
drivers/char/random.c

index 41f42d8875138ca9a7f6fbafcbde794585b2215c..be4922e09cc57675bc2671399bf93cc32d057ccc 100644 (file)
@@ -323,14 +323,11 @@ static struct crng_state primary_crng = {
  * its value (from 0->1->2).
  */
 static int crng_init = 0;
-static bool crng_need_final_init = false;
 #define crng_ready() (likely(crng_init > 1))
 static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
 #define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
-                                   u8 tmp[CHACHA_BLOCK_SIZE], int used);
+static void extract_crng(u8 out[CHACHA_BLOCK_SIZE]);
+static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used);
 static void process_random_ready_list(void);
 static void _get_random_bytes(void *buf, int nbytes);
 
@@ -365,7 +362,7 @@ static struct {
 
 static void extract_entropy(void *buf, size_t nbytes);
 
-static void crng_reseed(struct crng_state *crng);
+static void crng_reseed(void);
 
 /*
  * This function adds bytes into the entropy "pool".  It does not
@@ -464,7 +461,7 @@ static void credit_entropy_bits(int nbits)
        trace_credit_entropy_bits(nbits, entropy_count, _RET_IP_);
 
        if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
-               crng_reseed(&primary_crng);
+               crng_reseed();
 }
 
 /*********************************************************************
@@ -477,16 +474,7 @@ static void credit_entropy_bits(int nbits)
 
 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 
-/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel.  The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
- */
-static struct crng_state **crng_node_pool __read_mostly;
-
 static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
 
 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
 static int __init parse_trust_cpu(char *arg)
@@ -495,24 +483,6 @@ static int __init parse_trust_cpu(char *arg)
 }
 early_param("random.trust_cpu", parse_trust_cpu);
 
-static bool crng_init_try_arch(struct crng_state *crng)
-{
-       int i;
-       bool arch_init = true;
-       unsigned long rv;
-
-       for (i = 4; i < 16; i++) {
-               if (!arch_get_random_seed_long(&rv) &&
-                   !arch_get_random_long(&rv)) {
-                       rv = random_get_entropy();
-                       arch_init = false;
-               }
-               crng->state[i] ^= rv;
-       }
-
-       return arch_init;
-}
-
 static bool __init crng_init_try_arch_early(void)
 {
        int i;
@@ -531,100 +501,17 @@ static bool __init crng_init_try_arch_early(void)
        return arch_init;
 }
 
-static void crng_initialize_secondary(struct crng_state *crng)
-{
-       chacha_init_consts(crng->state);
-       _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
-       crng_init_try_arch(crng);
-       crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
-
-static void __init crng_initialize_primary(void)
+static void __init crng_initialize(void)
 {
        extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
        if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
                invalidate_batched_entropy();
-               numa_crng_init();
                crng_init = 2;
                pr_notice("crng init done (trusting CPU's manufacturer)\n");
        }
        primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
-static void crng_finalize_init(void)
-{
-       if (!system_wq) {
-               /* We can't call numa_crng_init until we have workqueues,
-                * so mark this for processing later. */
-               crng_need_final_init = true;
-               return;
-       }
-
-       invalidate_batched_entropy();
-       numa_crng_init();
-       crng_init = 2;
-       crng_need_final_init = false;
-       process_random_ready_list();
-       wake_up_interruptible(&crng_init_wait);
-       kill_fasync(&fasync, SIGIO, POLL_IN);
-       pr_notice("crng init done\n");
-       if (unseeded_warning.missed) {
-               pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
-                         unseeded_warning.missed);
-               unseeded_warning.missed = 0;
-       }
-       if (urandom_warning.missed) {
-               pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
-                         urandom_warning.missed);
-               urandom_warning.missed = 0;
-       }
-}
-
-static void do_numa_crng_init(struct work_struct *work)
-{
-       int i;
-       struct crng_state *crng;
-       struct crng_state **pool;
-
-       pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
-       for_each_online_node(i) {
-               crng = kmalloc_node(sizeof(struct crng_state),
-                                   GFP_KERNEL | __GFP_NOFAIL, i);
-               spin_lock_init(&crng->lock);
-               crng_initialize_secondary(crng);
-               pool[i] = crng;
-       }
-       /* pairs with READ_ONCE() in select_crng() */
-       if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
-               for_each_node(i)
-                       kfree(pool[i]);
-               kfree(pool);
-       }
-}
-
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
-
-static void numa_crng_init(void)
-{
-       if (IS_ENABLED(CONFIG_NUMA))
-               schedule_work(&numa_crng_init_work);
-}
-
-static struct crng_state *select_crng(void)
-{
-       if (IS_ENABLED(CONFIG_NUMA)) {
-               struct crng_state **pool;
-               int nid = numa_node_id();
-
-               /* pairs with cmpxchg_release() in do_numa_crng_init() */
-               pool = READ_ONCE(crng_node_pool);
-               if (pool && pool[nid])
-                       return pool[nid];
-       }
-
-       return &primary_crng;
-}
-
 /*
  * crng_fast_load() can be called by code in the interrupt service
  * path.  So we can't afford to dilly-dally. Returns the number of
@@ -702,68 +589,71 @@ static int crng_slow_load(const u8 *cp, size_t len)
        return 1;
 }
 
-static void crng_reseed(struct crng_state *crng)
+static void crng_reseed(void)
 {
        unsigned long flags;
-       int i;
+       int i, entropy_count;
        union {
                u8 block[CHACHA_BLOCK_SIZE];
                u32 key[8];
        } buf;
 
-       if (crng == &primary_crng) {
-               int entropy_count;
-               do {
-                       entropy_count = READ_ONCE(input_pool.entropy_count);
-                       if (entropy_count < POOL_MIN_BITS)
-                               return;
-               } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
-               extract_entropy(buf.key, sizeof(buf.key));
-               wake_up_interruptible(&random_write_wait);
-               kill_fasync(&fasync, SIGIO, POLL_OUT);
-       } else {
-               _extract_crng(&primary_crng, buf.block);
-               _crng_backtrack_protect(&primary_crng, buf.block,
-                                       CHACHA_KEY_SIZE);
-       }
-       spin_lock_irqsave(&crng->lock, flags);
+       do {
+               entropy_count = READ_ONCE(input_pool.entropy_count);
+               if (entropy_count < POOL_MIN_BITS)
+                       return;
+       } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
+       extract_entropy(buf.key, sizeof(buf.key));
+       wake_up_interruptible(&random_write_wait);
+       kill_fasync(&fasync, SIGIO, POLL_OUT);
+
+       spin_lock_irqsave(&primary_crng.lock, flags);
        for (i = 0; i < 8; i++)
-               crng->state[i + 4] ^= buf.key[i];
+               primary_crng.state[i + 4] ^= buf.key[i];
        memzero_explicit(&buf, sizeof(buf));
-       WRITE_ONCE(crng->init_time, jiffies);
-       spin_unlock_irqrestore(&crng->lock, flags);
-       if (crng == &primary_crng && crng_init < 2)
-               crng_finalize_init();
+       WRITE_ONCE(primary_crng.init_time, jiffies);
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
+       if (crng_init < 2) {
+               invalidate_batched_entropy();
+               crng_init = 2;
+               process_random_ready_list();
+               wake_up_interruptible(&crng_init_wait);
+               kill_fasync(&fasync, SIGIO, POLL_IN);
+               pr_notice("crng init done\n");
+               if (unseeded_warning.missed) {
+                       pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+                                 unseeded_warning.missed);
+                       unseeded_warning.missed = 0;
+               }
+               if (urandom_warning.missed) {
+                       pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+                                 urandom_warning.missed);
+                       urandom_warning.missed = 0;
+               }
+       }
 }
 
-static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
+static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
 {
        unsigned long flags, init_time;
 
        if (crng_ready()) {
-               init_time = READ_ONCE(crng->init_time);
-               if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
-                   time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
-                       crng_reseed(crng);
+               init_time = READ_ONCE(primary_crng.init_time);
+               if (time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
+                       crng_reseed();
        }
-       spin_lock_irqsave(&crng->lock, flags);
-       chacha20_block(&crng->state[0], out);
-       if (crng->state[12] == 0)
-               crng->state[13]++;
-       spin_unlock_irqrestore(&crng->lock, flags);
-}
-
-static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
-{
-       _extract_crng(select_crng(), out);
+       spin_lock_irqsave(&primary_crng.lock, flags);
+       chacha20_block(&primary_crng.state[0], out);
+       if (primary_crng.state[12] == 0)
+               primary_crng.state[13]++;
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
 /*
  * Use the leftover bytes from the CRNG block output (if there is
  * enough) to mutate the CRNG key to provide backtracking protection.
  */
-static void _crng_backtrack_protect(struct crng_state *crng,
-                                   u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
 {
        unsigned long flags;
        u32 *s, *d;
@@ -774,17 +664,12 @@ static void _crng_backtrack_protect(struct crng_state *crng,
                extract_crng(tmp);
                used = 0;
        }
-       spin_lock_irqsave(&crng->lock, flags);
+       spin_lock_irqsave(&primary_crng.lock, flags);
        s = (u32 *)&tmp[used];
-       d = &crng->state[4];
+       d = &primary_crng.state[4];
        for (i = 0; i < 8; i++)
                *d++ ^= *s++;
-       spin_unlock_irqrestore(&crng->lock, flags);
-}
-
-static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
-{
-       _crng_backtrack_protect(select_crng(), tmp, used);
+       spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
 static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
@@ -1371,10 +1256,7 @@ static void __init init_std_data(void)
 int __init rand_initialize(void)
 {
        init_std_data();
-       if (crng_need_final_init)
-               crng_finalize_init();
-       crng_initialize_primary();
-       crng_global_init_time = jiffies;
+       crng_initialize();
        if (ratelimit_disable) {
                urandom_warning.interval = 0;
                unseeded_warning.interval = 0;
@@ -1544,8 +1426,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                        return -EPERM;
                if (crng_init < 2)
                        return -ENODATA;
-               crng_reseed(&primary_crng);
-               WRITE_ONCE(crng_global_init_time, jiffies - 1);
+               crng_reseed();
                return 0;
        default:
                return -EINVAL;