{
        int cpu = smp_processor_id();
 
-       if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+       if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK))
                return false;
 
        if (idle_cpu(cpu) && !need_resched())
         * We can't run Idle Load Balance on this CPU for this time so we
         * cancel it and clear NOHZ_BALANCE_KICK
         */
-       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+       atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu));
        return false;
 }
 
                rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ_COMMON
                rq->last_load_update_tick = jiffies;
-               rq->nohz_flags = 0;
+               atomic_set(&rq->nohz_flags, 0);
 #endif
 #endif /* CONFIG_SMP */
                hrtick_rq_init(rq);
 
  */
 static void nohz_balancer_kick(void)
 {
+       unsigned int flags;
        int ilb_cpu;
 
        nohz.next_balance++;
        if (ilb_cpu >= nr_cpu_ids)
                return;
 
-       if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+       flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu));
+       if (flags & NOHZ_BALANCE_KICK)
                return;
        /*
         * Use smp_send_reschedule() instead of resched_cpu().
 
 void nohz_balance_exit_idle(unsigned int cpu)
 {
-       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+       unsigned int flags = atomic_read(nohz_flags(cpu));
+
+       if (unlikely(flags & NOHZ_TICK_STOPPED)) {
                /*
                 * Completely isolated CPUs don't ever set, so we must test.
                 */
                        cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
                        atomic_dec(&nohz.nr_cpus);
                }
-               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+               atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
        }
 }
 
        if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
                return;
 
-       if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
+       if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
                return;
 
        /* If we're a completely isolated CPU, we don't play: */
 
        cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
        atomic_inc(&nohz.nr_cpus);
-       set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+       atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 }
 #endif
 
        unsigned long next_balance = jiffies + 60*HZ;
        int update_next_balance = 0;
 
-       if (idle != CPU_IDLE ||
-           !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+       if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK))
+               return;
+
+       if (idle != CPU_IDLE)
                goto end;
 
        for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
        if (likely(update_next_balance))
                nohz.next_balance = next_balance;
 end:
-       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
+       atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
 /*
 
 #ifdef CONFIG_SMP
        unsigned long           last_load_update_tick;
 #endif /* CONFIG_SMP */
-       unsigned long           nohz_flags;
+       atomic_t nohz_flags;
 #endif /* CONFIG_NO_HZ_COMMON */
 
        /* capture load from *all* tasks on this CPU: */
 extern void cfs_bandwidth_usage_dec(void);
 
 #ifdef CONFIG_NO_HZ_COMMON
-enum rq_nohz_flag_bits {
-       NOHZ_TICK_STOPPED,
-       NOHZ_BALANCE_KICK,
-};
+#define NOHZ_TICK_STOPPED_BIT  0
+#define NOHZ_BALANCE_KICK_BIT  1
+
+#define NOHZ_TICK_STOPPED      BIT(NOHZ_TICK_STOPPED_BIT)
+#define NOHZ_BALANCE_KICK      BIT(NOHZ_BALANCE_KICK_BIT)
 
 #define nohz_flags(cpu)        (&cpu_rq(cpu)->nohz_flags)