blk-mq: fold blk_mq_sched_insert_requests into blk_mq_dispatch_plug_list
authorChristoph Hellwig <hch@lst.de>
Thu, 13 Apr 2023 06:40:42 +0000 (08:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Apr 2023 12:52:29 +0000 (06:52 -0600)
blk_mq_dispatch_plug_list is the only caller of
blk_mq_sched_insert_requests, and it makes sense to just fold it there
as blk_mq_sched_insert_requests isn't specific to I/O schedulers despite
the name.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-6-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/blk-mq.h
block/mq-deadline.c

index 811a9765b745c08ec8ec2d9c71353f76990eaf9d..9c0d231722d9ce52d81e68f9113e75cd4b412830 100644 (file)
@@ -455,30 +455,6 @@ run:
                blk_mq_run_hw_queue(hctx, async);
 }
 
-void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
-                                 struct blk_mq_ctx *ctx,
-                                 struct list_head *list, bool run_queue_async)
-{
-       struct elevator_queue *e;
-       struct request_queue *q = hctx->queue;
-
-       /*
-        * blk_mq_sched_insert_requests() is called from flush plug
-        * context only, and hold one usage counter to prevent queue
-        * from being released.
-        */
-       percpu_ref_get(&q->q_usage_counter);
-
-       e = hctx->queue->elevator;
-       if (e) {
-               e->type->ops.insert_requests(hctx, list, false);
-               blk_mq_run_hw_queue(hctx, run_queue_async);
-       } else {
-               blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
-       }
-       percpu_ref_put(&q->q_usage_counter);
-}
-
 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
                                          struct blk_mq_hw_ctx *hctx,
                                          unsigned int hctx_idx)
index 65cab6e475be8eb9fff5fdec133b8401c9a34994..1ec01e9934dc45a5ddd67e69a9073945d4ca1c23 100644 (file)
@@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
                                 bool run_queue, bool async);
-void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
-                                 struct blk_mq_ctx *ctx,
-                                 struct list_head *list, bool run_queue_async);
 
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
 
index 536f001282bb6375227bed61c591ff5da2472fc1..f1da4f053cc6916417b5a891b4b69c716359c2cb 100644 (file)
@@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
                blk_mq_run_hw_queue(hctx, false);
 }
 
-void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                           struct list_head *list, bool run_queue_async)
-
+static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
+               struct blk_mq_ctx *ctx, struct list_head *list,
+               bool run_queue_async)
 {
        struct request *rq;
        enum hctx_type type = hctx->type;
@@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
 
        plug->mq_list = requeue_list;
        trace_block_unplug(this_hctx->queue, depth, !from_sched);
-       blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
+
+       percpu_ref_get(&this_hctx->queue->q_usage_counter);
+       if (this_hctx->queue->elevator) {
+               this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
+                               &list, false);
+               blk_mq_run_hw_queue(this_hctx, from_sched);
+       } else {
+               blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
+       }
+       percpu_ref_put(&this_hctx->queue->q_usage_counter);
 }
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
index 5d551f9ef2d6bec3a3909b027bb3ce189504965c..bd7ae5e67a526b7dd3ce8cd57755896f5a17028b 100644 (file)
@@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
                                bool at_head);
 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
                                  bool run_queue);
-void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                               struct list_head *list, bool run_queue_async);
 
 /*
  * CPU -> queue mappings
index af9e79050dcc1fcc3f4f555da8b950d0aee506b8..d62a3039c8e04f188b91baf594e9499d29767e07 100644 (file)
@@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 }
 
 /*
- * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
+ * Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list().
  */
 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
                               struct list_head *list, bool at_head)