}
 EXPORT_SYMBOL(get_random_u32);
 
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int random_prepare_cpu(unsigned int cpu)
+{
+       /*
+        * When the cpu comes back online, immediately invalidate both
+        * the per-cpu crng and all batches, so that we serve fresh
+        * randomness.
+        */
+       per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+       per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+       per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+       return 0;
+}
+#endif
+
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:     The smallest acceptable address the caller will take.
        };
        struct work_struct mix;
        unsigned long last;
-       atomic_t count;
+       unsigned int count;
        u16 reg_idx;
 };
 
 
 static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
 
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+ */
+int random_online_cpu(unsigned int cpu)
+{
+       /*
+        * During CPU shutdown and before CPU onlining, add_interrupt_
+        * randomness() may schedule mix_interrupt_randomness(), and
+        * set the MIX_INFLIGHT flag. However, because the worker can
+        * be scheduled on a different CPU during this period, that
+        * flag will never be cleared. For that reason, we zero out
+        * the flag here, which runs just after workqueues are onlined
+        * for the CPU again. This also has the effect of setting the
+        * irq randomness count to zero so that new accumulated irqs
+        * are fresh.
+        */
+       per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+       return 0;
+}
+#endif
+
 static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        u32 *ptr = (u32 *)regs;
        local_irq_disable();
        if (fast_pool != this_cpu_ptr(&irq_randomness)) {
                local_irq_enable();
-               /*
-                * If we are unlucky enough to have been moved to another CPU,
-                * during CPU hotplug while the CPU was shutdown then we set
-                * our count to zero atomically so that when the CPU comes
-                * back online, it can enqueue work again. The _release here
-                * pairs with the atomic_inc_return_acquire in
-                * add_interrupt_randomness().
-                */
-               atomic_set_release(&fast_pool->count, 0);
                return;
        }
 
         * consistent view, before we reenable irqs again.
         */
        memcpy(pool, fast_pool->pool32, sizeof(pool));
-       atomic_set(&fast_pool->count, 0);
+       fast_pool->count = 0;
        fast_pool->last = jiffies;
        local_irq_enable();
 
        }
 
        fast_mix(fast_pool->pool32);
-       /* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
-       new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
+       new_count = ++fast_pool->count;
 
        if (unlikely(crng_init == 0)) {
                if (new_count >= 64 &&
                    crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
                                         true, true) > 0) {
-                       atomic_set(&fast_pool->count, 0);
+                       fast_pool->count = 0;
                        fast_pool->last = now;
                        if (spin_trylock(&input_pool.lock)) {
                                _mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
 
        if (unlikely(!fast_pool->mix.func))
                INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
-       atomic_or(MIX_INFLIGHT, &fast_pool->count);
+       fast_pool->count |= MIX_INFLIGHT;
        queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
 }
 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
 
 #include <linux/scs.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/cpuset.h>
+#include <linux/random.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
                .startup.single         = perf_event_init_cpu,
                .teardown.single        = perf_event_exit_cpu,
        },
+       [CPUHP_RANDOM_PREPARE] = {
+               .name                   = "random:prepare",
+               .startup.single         = random_prepare_cpu,
+               .teardown.single        = NULL,
+       },
        [CPUHP_WORKQUEUE_PREP] = {
                .name                   = "workqueue:prepare",
                .startup.single         = workqueue_prepare_cpu,
                .startup.single         = workqueue_online_cpu,
                .teardown.single        = workqueue_offline_cpu,
        },
+       [CPUHP_AP_RANDOM_ONLINE] = {
+               .name                   = "random:online",
+               .startup.single         = random_online_cpu,
+               .teardown.single        = NULL,
+       },
        [CPUHP_AP_RCUTREE_ONLINE] = {
                .name                   = "RCU/tree:online",
                .startup.single         = rcutree_online_cpu,