blk-mq: defer to the normal submission path for non-flush flush commands
authorChristoph Hellwig <hch@lst.de>
Fri, 19 May 2023 04:40:46 +0000 (06:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Sat, 20 May 2023 01:52:29 +0000 (19:52 -0600)
If blk_insert_flush decides that a command does not need to use the
flush state machine, return false and let blk_mq_submit_bio handle
it the normal way (including using an I/O scheduler) instead of doing
a bypass insert.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230519044050.107790-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h
block/blk.h

index d8144f1f6fb12fc4c1a9c39995b1dc2c26171dde..6fb9cf2d38184bcafacdab42f4ed5d90ae22fd08 100644 (file)
@@ -385,22 +385,17 @@ static void blk_rq_init_flush(struct request *rq)
        rq->end_io = mq_flush_data_end_io;
 }
 
-/**
- * blk_insert_flush - insert a new PREFLUSH/FUA request
- * @rq: request to insert
- *
- * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
- * or __blk_mq_run_hw_queue() to dispatch request.
- * @rq is being submitted.  Analyze what needs to be done and put it on the
- * right queue.
+/*
+ * Insert a PREFLUSH/FUA request into the flush state machine.
+ * Returns true if the request has been consumed by the flush state machine,
+ * or false if the caller should continue to process it.
  */
-void blk_insert_flush(struct request *rq)
+bool blk_insert_flush(struct request *rq)
 {
        struct request_queue *q = rq->q;
        unsigned long fflags = q->queue_flags;  /* may change, cache */
        unsigned int policy = blk_flush_policy(fflags, rq);
        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
-       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        /* FLUSH/FUA request must never be merged */
        WARN_ON_ONCE(rq->bio != rq->biotail);
@@ -429,16 +424,14 @@ void blk_insert_flush(struct request *rq)
                 * complete the request.
                 */
                blk_mq_end_request(rq, 0);
-               return;
+               return true;
        case REQ_FSEQ_DATA:
                /*
                 * If there's data, but no flush is necessary, the request can
                 * be processed directly without going through flush machinery.
                 * Queue for normal execution.
                 */
-               blk_mq_request_bypass_insert(rq, 0);
-               blk_mq_run_hw_queue(hctx, false);
-               return;
+               return false;
        default:
                /*
                 * Mark the request as part of a flush sequence and submit it
@@ -448,6 +441,7 @@ void blk_insert_flush(struct request *rq)
                spin_lock_irq(&fq->mq_flush_lock);
                blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
                spin_unlock_irq(&fq->mq_flush_lock);
+               return true;
        }
 }
 
index e021740154feaeedf0496f0f7115fb8de8b565ae..c0b394096b6b6bb4be7bf3cddce3399ab05b1aad 100644 (file)
@@ -45,6 +45,8 @@
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
 
 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
+static void blk_mq_request_bypass_insert(struct request *rq,
+               blk_insert_t flags);
 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list);
 
@@ -2430,7 +2432,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
+static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
@@ -2977,10 +2979,8 @@ void blk_mq_submit_bio(struct bio *bio)
                return;
        }
 
-       if (op_is_flush(bio->bi_opf)) {
-               blk_insert_flush(rq);
+       if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
                return;
-       }
 
        if (plug) {
                blk_add_rq_to_plug(plug, rq);
index d15981db34b958c71eee1b4bdad82c4a01da98e9..ec7d2fb0b3c8ef36b68dfeb66f51da42cc68c9da 100644 (file)
@@ -64,10 +64,6 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
                             struct blk_mq_tags *tags,
                             unsigned int hctx_idx);
-/*
- * Internal helpers for request insertion into sw queues
- */
-void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
 
 /*
  * CPU -> queue mappings
index 45547bcf111938db1fcd7744f8a90641b56c54ad..9f171b8f1e3402d938c3545db3caac8f7baf4c98 100644 (file)
@@ -269,7 +269,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
  */
 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 
-void blk_insert_flush(struct request *rq);
+bool blk_insert_flush(struct request *rq);
 
 int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
 void elevator_disable(struct request_queue *q);