#define LBF_NEED_BREAK 0x02
 #define LBF_DST_PINNED  0x04
 #define LBF_SOME_PINNED        0x08
-#define LBF_NOHZ_STATS 0x10
-#define LBF_NOHZ_AGAIN 0x20
 
 struct lb_env {
        struct sched_domain     *sd;
        for_each_cpu_and(i, sched_group_span(group), env->cpus) {
                struct rq *rq = cpu_rq(i);
 
-               if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
-                       env->flags |= LBF_NOHZ_AGAIN;
-
                sgs->group_load += cpu_load(rq);
                sgs->group_util += cpu_util(i);
                sgs->group_runnable += cpu_runnable(rq);
        struct sg_lb_stats tmp_sgs;
        int sg_status = 0;
 
-#ifdef CONFIG_NO_HZ_COMMON
-       if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
-               env->flags |= LBF_NOHZ_STATS;
-#endif
-
        do {
                struct sg_lb_stats *sgs = &tmp_sgs;
                int local_group;
        /* Tag domain that child domain prefers tasks go to siblings first */
        sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
 
-#ifdef CONFIG_NO_HZ_COMMON
-       if ((env->flags & LBF_NOHZ_AGAIN) &&
-           cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
-
-               WRITE_ONCE(nohz.next_blocked,
-                          jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
-       }
-#endif
 
        if (env->sd->flags & SD_NUMA)
                env->fbq_type = fbq_classify_group(&sds->busiest_stat);
            time_before(jiffies, READ_ONCE(nohz.next_blocked)))
                return;
 
-       raw_spin_unlock(&this_rq->lock);
        /*
-        * This CPU is going to be idle and blocked load of idle CPUs
-        * need to be updated. Run the ilb locally as it is a good
-        * candidate for ilb instead of waking up another idle CPU.
-        * Kick an normal ilb if we failed to do the update.
+        * Blocked load of idle CPUs need to be updated.
+        * Kick an ILB to update statistics.
         */
-       if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
-               kick_ilb(NOHZ_STATS_KICK);
-       raw_spin_lock(&this_rq->lock);
+       kick_ilb(NOHZ_STATS_KICK);
 }
 
 #else /* !CONFIG_NO_HZ_COMMON */
                        update_next_balance(sd, &next_balance);
                rcu_read_unlock();
 
-               nohz_newidle_balance(this_rq);
-
                goto out;
        }
 
 
        if (pulled_task)
                this_rq->idle_stamp = 0;
+       else
+               nohz_newidle_balance(this_rq);
 
        rq_repin_lock(this_rq, rf);