return;
        }
 
-       if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
-               cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
-
-               smp_mb();
-               /*
-                * Use smp_send_reschedule() instead of resched_cpu().
-                * This way we generate a sched IPI on the target cpu which
-                * is idle. And the softirq performing nohz idle load balance
-                * will be run before returning from the IPI.
-                */
-               smp_send_reschedule(ilb_cpu);
-       }
+       if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+               return;
+       /*
+        * Use smp_send_reschedule() instead of resched_cpu().
+        * This way we generate a sched IPI on the target cpu which
+        * is idle. And the softirq performing nohz idle load balance
+        * will be run before returning from the IPI.
+        */
+       smp_send_reschedule(ilb_cpu);
        return;
 }
 
                        }
                        return;
                }
+
+               set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
        } else {
                if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
                        return;
        struct rq *rq;
        int balance_cpu;
 
-       if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
-               return;
+       if (idle != CPU_IDLE ||
+           !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+               goto end;
 
        for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
                if (balance_cpu == this_cpu)
                 * work being done for other cpus. Next load
                 * balancing owner will pick it up.
                 */
-               if (need_resched()) {
-                       this_rq->nohz_balance_kick = 0;
+               if (need_resched())
                        break;
-               }
 
                raw_spin_lock_irq(&this_rq->lock);
                update_rq_clock(this_rq);
                        this_rq->next_balance = rq->next_balance;
        }
        nohz.next_balance = this_rq->next_balance;
-       this_rq->nohz_balance_kick = 0;
+end:
+       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
 /*
        int ret;
        int first_pick_cpu, second_pick_cpu;
 
-       if (time_before(now, nohz.next_balance))
+       if (unlikely(idle_cpu(cpu)))
                return 0;
 
-       if (idle_cpu(cpu))
+       /*
+       * We may be recently in ticked or tickless idle mode. At the first
+       * busy tick after returning from idle, we will update the busy stats.
+       */
+       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))))
+               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+       if (time_before(now, nohz.next_balance))
                return 0;
 
        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
            likely(!on_null_domain(cpu)))
                raise_softirq(SCHED_SOFTIRQ);
 #ifdef CONFIG_NO_HZ
-       else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
+       if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
                nohz_balancer_kick(cpu);
 #endif
 }