block: improve readability of blk_mq_end_request_batch()
authorJens Axboe <axboe@kernel.dk>
Thu, 28 Oct 2021 18:08:34 +0000 (12:08 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 28 Oct 2021 18:08:34 +0000 (12:08 -0600)
It's faster and easier to read if we tolerate cur_hctx being NULL in
the "when to flush" condition. Rename last_hctx to cur_hctx while at it,
as it better describes the role of that variable.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index ec966e0b172d63af09060669a7066f886cbf9971..221d1b7d10d663f6a11272d06f112efa17e3e7cc 100644 (file)
@@ -822,7 +822,7 @@ static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
 void blk_mq_end_request_batch(struct io_comp_batch *iob)
 {
        int tags[TAG_COMP_BATCH], nr_tags = 0;
-       struct blk_mq_hw_ctx *last_hctx = NULL;
+       struct blk_mq_hw_ctx *cur_hctx = NULL;
        struct request *rq;
        u64 now = 0;
 
@@ -845,17 +845,17 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
                blk_pm_mark_last_busy(rq);
                rq_qos_done(rq->q, rq);
 
-               if (nr_tags == TAG_COMP_BATCH ||
-                   (last_hctx && last_hctx != rq->mq_hctx)) {
-                       blk_mq_flush_tag_batch(last_hctx, tags, nr_tags);
+               if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
+                       if (cur_hctx)
+                               blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
                        nr_tags = 0;
+                       cur_hctx = rq->mq_hctx;
                }
                tags[nr_tags++] = rq->tag;
-               last_hctx = rq->mq_hctx;
        }
 
        if (nr_tags)
-               blk_mq_flush_tag_batch(last_hctx, tags, nr_tags);
+               blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
 }
 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);