* Scheduling class queueing methods:
  */
 
+#ifdef CONFIG_NUMA
+#define NUMA_IMBALANCE_MIN 2
+
+static inline long
+adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
+{
+       /*
+        * Allow a NUMA imbalance if busy CPUs is less than the maximum
+        * threshold. Above this threshold, individual tasks may be contending
+        * for both memory bandwidth and any shared HT resources.  This is an
+        * approximation as the number of running tasks may not be related to
+        * the number of busy CPUs due to sched_setaffinity.
+        */
+       if (dst_running > imb_numa_nr)
+               return imbalance;
+
+       /*
+        * Allow a small imbalance based on a simple pair of communicating
+        * tasks that remain local when the destination is lightly loaded.
+        */
+       if (imbalance <= NUMA_IMBALANCE_MIN)
+               return 0;
+
+       return imbalance;
+}
+#endif /* CONFIG_NUMA */
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * Approximate time to scan a full NUMA task in ms. The task scan period is
 
 static unsigned long cpu_load(struct rq *rq);
 static unsigned long cpu_runnable(struct rq *rq);
-static inline long adjust_numa_imbalance(int imbalance,
-                                       int dst_running, int imb_numa_nr);
 
 static inline enum
 numa_type numa_classify(unsigned int imbalance_pct,
        return true;
 }
 
-/*
- * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain.
- * This is an approximation as the number of running tasks may not be
- * related to the number of busy CPUs due to sched_setaffinity.
- */
-static inline bool allow_numa_imbalance(int running, int imb_numa_nr)
-{
-       return running <= imb_numa_nr;
-}
-
 /*
  * find_idlest_group() finds and returns the least busy CPU group within the
  * domain.
                break;
 
        case group_has_spare:
+#ifdef CONFIG_NUMA
                if (sd->flags & SD_NUMA) {
 #ifdef CONFIG_NUMA_BALANCING
                        int idlest_cpu;
                        idlest_cpu = cpumask_first(sched_group_span(idlest));
                        if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
                                return idlest;
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
                        /*
                         * Otherwise, keep the task close to the wakeup source
                         * and improve locality if the number of running tasks
                         * allowed. If there is a real need of migration,
                         * periodic load balance will take care of it.
                         */
-                       if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, sd->imb_numa_nr))
+                       imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
+                       if (!adjust_numa_imbalance(imbalance,
+                                                  local_sgs.sum_nr_running + 1,
+                                                  sd->imb_numa_nr)) {
                                return NULL;
+                       }
                }
+#endif /* CONFIG_NUMA */
 
                /*
                 * Select group with highest number of idle CPUs. We could also
        }
 }
 
-#define NUMA_IMBALANCE_MIN 2
-
-static inline long adjust_numa_imbalance(int imbalance,
-                               int dst_running, int imb_numa_nr)
-{
-       if (!allow_numa_imbalance(dst_running, imb_numa_nr))
-               return imbalance;
-
-       /*
-        * Allow a small imbalance based on a simple pair of communicating
-        * tasks that remain local when the destination is lightly loaded.
-        */
-       if (imbalance <= NUMA_IMBALANCE_MIN)
-               return 0;
-
-       return imbalance;
-}
-
 /**
  * calculate_imbalance - Calculate the amount of imbalance present within the
  *                      groups of a given sched_domain during load balance.
                         */
                        env->migration_type = migrate_task;
                        lsub_positive(&nr_diff, local->sum_nr_running);
-                       env->imbalance = nr_diff >> 1;
+                       env->imbalance = nr_diff;
                } else {
 
                        /*
                         * idle cpus.
                         */
                        env->migration_type = migrate_task;
-                       env->imbalance = max_t(long, 0, (local->idle_cpus -
-                                                busiest->idle_cpus) >> 1);
+                       env->imbalance = max_t(long, 0,
+                                              (local->idle_cpus - busiest->idle_cpus));
                }
 
+#ifdef CONFIG_NUMA
                /* Consider allowing a small imbalance between NUMA groups */
                if (env->sd->flags & SD_NUMA) {
                        env->imbalance = adjust_numa_imbalance(env->imbalance,
-                               local->sum_nr_running + 1, env->sd->imb_numa_nr);
+                                                              local->sum_nr_running + 1,
+                                                              env->sd->imb_numa_nr);
                }
+#endif
+
+               /* Number of tasks to move to restore balance */
+               env->imbalance >>= 1;
 
                return;
        }