rq_list_add(data->cached_rq, rq);
                nr++;
        }
+       if (!(data->rq_flags & RQF_SCHED_TAGS))
+               blk_mq_add_active_requests(data->hctx, nr);
        /* caller already holds a reference, add for remainder */
        percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
        data->nr_tags -= nr;
                goto retry;
        }
 
+       if (!(data->rq_flags & RQF_SCHED_TAGS))
+               blk_mq_inc_active_requests(data->hctx);
        rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
        blk_mq_rq_time_init(rq, alloc_time_ns);
        return rq;
        tag = blk_mq_get_tag(&data);
        if (tag == BLK_MQ_NO_TAG)
                goto out_queue_exit;
+       if (!(data.rq_flags & RQF_SCHED_TAGS))
+               blk_mq_inc_active_requests(data.hctx);
        rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
        blk_mq_rq_time_init(rq, alloc_time_ns);
        rq->__data_len = 0;
        blk_pm_mark_last_busy(rq);
        rq->mq_hctx = NULL;
 
-       if (rq->rq_flags & RQF_MQ_INFLIGHT)
-               __blk_mq_dec_active_requests(hctx);
-
-       if (rq->tag != BLK_MQ_NO_TAG)
+       if (rq->tag != BLK_MQ_NO_TAG) {
+               blk_mq_dec_active_requests(hctx);
                blk_mq_put_tag(hctx->tags, ctx, rq->tag);
+       }
        if (sched_tag != BLK_MQ_NO_TAG)
                blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
        blk_mq_sched_restart(hctx);
         * All requests should have been marked as RQF_MQ_INFLIGHT, so
         * update hctx->nr_active in batch
         */
-       if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
-               __blk_mq_sub_active_requests(hctx, nr_tags);
+       blk_mq_sub_active_requests(hctx, nr_tags);
 
        blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
        percpu_ref_put_many(&q->q_usage_counter, nr_tags);
        return data.rq;
 }
 
-static bool __blk_mq_alloc_driver_tag(struct request *rq)
+bool __blk_mq_alloc_driver_tag(struct request *rq)
 {
        struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
        unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
                return false;
 
        rq->tag = tag + tag_offset;
-       return true;
-}
-
-bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
-{
-       if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
-               return false;
-
-       if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
-                       !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
-               rq->rq_flags |= RQF_MQ_INFLIGHT;
-               __blk_mq_inc_active_requests(hctx);
-       }
-       hctx->tags->rqs[rq->tag] = rq;
+       blk_mq_inc_active_requests(rq->mq_hctx);
        return true;
 }
 
 
        return -1;
 }
 
-static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
+static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
+                                               int val)
 {
        if (blk_mq_is_shared_tags(hctx->flags))
-               atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
+               atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
        else
-               atomic_inc(&hctx->nr_active);
+               atomic_add(val, &hctx->nr_active);
+}
+
+static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+       __blk_mq_add_active_requests(hctx, 1);
 }
 
 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
        __blk_mq_sub_active_requests(hctx, 1);
 }
 
+static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
+                                             int val)
+{
+       if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+               __blk_mq_add_active_requests(hctx, val);
+}
+
+static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+       if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+               __blk_mq_inc_active_requests(hctx);
+}
+
+static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
+                                             int val)
+{
+       if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+               __blk_mq_sub_active_requests(hctx, val);
+}
+
+static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+       if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+               __blk_mq_dec_active_requests(hctx);
+}
+
 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
 {
        if (blk_mq_is_shared_tags(hctx->flags))
 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
                                           struct request *rq)
 {
+       blk_mq_dec_active_requests(hctx);
        blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
        rq->tag = BLK_MQ_NO_TAG;
-
-       if (rq->rq_flags & RQF_MQ_INFLIGHT) {
-               rq->rq_flags &= ~RQF_MQ_INFLIGHT;
-               __blk_mq_dec_active_requests(hctx);
-       }
 }
 
 static inline void blk_mq_put_driver_tag(struct request *rq)
        __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 }
 
-bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
+bool __blk_mq_alloc_driver_tag(struct request *rq);
 
 static inline bool blk_mq_get_driver_tag(struct request *rq)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
-       if (rq->tag != BLK_MQ_NO_TAG &&
-           !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
-               hctx->tags->rqs[rq->tag] = rq;
-               return true;
-       }
+       if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
+               return false;
 
-       return __blk_mq_get_driver_tag(hctx, rq);
+       hctx->tags->rqs[rq->tag] = rq;
+       return true;
 }
 
 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)