block/null_blk: Fix double blk_mq_start_request() warning
authorChengming Zhou <zhouchengming@bytedance.com>
Mon, 20 Nov 2023 03:25:21 +0000 (03:25 +0000)
committerJens Axboe <axboe@kernel.dk>
Mon, 20 Nov 2023 17:26:26 +0000 (10:26 -0700)
When CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is enabled, null_queue_rq()
would return BLK_STS_RESOURCE or BLK_STS_DEV_RESOURCE for the request,
which has been marked as MQ_RQ_IN_FLIGHT by blk_mq_start_request().

Then null_queue_rqs() put these requests in the rqlist, return back to
the block layer core, which would try to queue them individually again,
so the warning in blk_mq_start_request() triggered.

Fix it by splitting the null_queue_rq() into two parts: the first is the
preparation of request, the second is the handling of request. We put
the blk_mq_start_request() after the preparation part, which may fail
and return back to the block layer core.

The throttling also belongs to the preparation part, so move it before
blk_mq_start_request(). And change the return type of null_handle_cmd()
to void, since it always return BLK_STS_OK now.

Reported-by: <syzbot+fcc47ba2476570cbbeb0@syzkaller.appspotmail.com>
Closes: https://lore.kernel.org/all/0000000000000e6aac06098aee0c@google.com/
Fixes: d78bfa1346ab ("block/null_blk: add queue_rqs() support")
Suggested-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Link: https://lore.kernel.org/r/20231120032521.1012037-1-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/null_blk/main.c

index 22a3cf7f32e2335d87710dfa32f0c286ae1768aa..3021d58ca51c1ff4dc223446b3bd9a3fb0b685f9 100644 (file)
@@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
        return BLK_STS_OK;
 }
 
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
-                                   sector_t nr_sectors, enum req_op op)
+static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+                           sector_t nr_sectors, enum req_op op)
 {
        struct nullb_device *dev = cmd->nq->dev;
        struct nullb *nullb = dev->nullb;
        blk_status_t sts;
 
-       if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
-               sts = null_handle_throttled(cmd);
-               if (sts != BLK_STS_OK)
-                       return sts;
-       }
-
        if (op == REQ_OP_FLUSH) {
                cmd->error = errno_to_blk_status(null_handle_flush(nullb));
                goto out;
@@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
 
 out:
        nullb_complete_cmd(cmd);
-       return BLK_STS_OK;
 }
 
 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        cmd->fake_timeout = should_timeout_request(rq) ||
                blk_should_fake_timeout(rq->q);
 
-       blk_mq_start_request(rq);
-
        if (should_requeue_request(rq)) {
                /*
                 * Alternate between hitting the core BUSY path, and the
@@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                return BLK_STS_OK;
        }
 
+       if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
+               blk_status_t sts = null_handle_throttled(cmd);
+
+               if (sts != BLK_STS_OK)
+                       return sts;
+       }
+
+       blk_mq_start_request(rq);
+
        if (is_poll) {
                spin_lock(&nq->poll_lock);
                list_add_tail(&rq->queuelist, &nq->poll_list);
@@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (cmd->fake_timeout)
                return BLK_STS_OK;
 
-       return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+       null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+       return BLK_STS_OK;
 }
 
 static void null_queue_rqs(struct request **rqlist)