#endif /* CONFIG_SMP */
 
 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
-static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
+static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
 
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        rt_se = rt_rq->tg->rt_se[cpu];
 
        if (!rt_se) {
-               dequeue_top_rt_rq(rt_rq);
+               dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
                /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
                cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
        }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
-       dequeue_top_rt_rq(rt_rq);
+       dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
 }
 
 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 }
 
 static void
-dequeue_top_rt_rq(struct rt_rq *rt_rq)
+dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
 {
        struct rq *rq = rq_of_rt_rq(rt_rq);
 
 
        BUG_ON(!rq->nr_running);
 
-       sub_nr_running(rq, rt_rq->rt_nr_running);
+       sub_nr_running(rq, count);
        rt_rq->rt_queued = 0;
 
 }
 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
 {
        struct sched_rt_entity *back = NULL;
+       unsigned int rt_nr_running;
 
        for_each_sched_rt_entity(rt_se) {
                rt_se->back = back;
                back = rt_se;
        }
 
-       dequeue_top_rt_rq(rt_rq_of_se(back));
+       rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
 
        for (rt_se = back; rt_se; rt_se = rt_se->back) {
                if (on_rt_rq(rt_se))
                        __dequeue_rt_entity(rt_se, flags);
        }
+
+       dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
 }
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)