block: add a poll_fn callback to struct request_queue
authorChristoph Hellwig <hch@lst.de>
Thu, 2 Nov 2017 18:29:54 +0000 (21:29 +0300)
committerJens Axboe <axboe@kernel.dk>
Fri, 3 Nov 2017 16:31:48 +0000 (10:31 -0600)
That we we can also poll non blk-mq queues.  Mostly needed for
the NVMe multipath code, but could also be useful elsewhere.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
drivers/nvme/target/io-cmd.c
fs/block_dev.c
fs/direct-io.c
fs/iomap.c
include/linux/blkdev.h
mm/page_io.c

index 68cfe6780a9b7329ce5b95a3c7d5e8b4c64c7abe..395bfb10d65819a8602664312e1c152156f686b2 100644 (file)
@@ -2321,6 +2321,17 @@ blk_qc_t submit_bio(struct bio *bio)
 }
 EXPORT_SYMBOL(submit_bio);
 
+bool blk_poll(struct request_queue *q, blk_qc_t cookie)
+{
+       if (!q->poll_fn || !blk_qc_t_valid(cookie))
+               return false;
+
+       if (current->plug)
+               blk_flush_plug_list(current->plug, false);
+       return q->poll_fn(q, cookie);
+}
+EXPORT_SYMBOL_GPL(blk_poll);
+
 /**
  * blk_cloned_rq_check_limits - Helper function to check a cloned request
  *                              for new the queue limits
index e4d2490f4e7eed7fb288f93028c82798291ff0c3..95ea5889b8252ae4f34b93a342fd50eae13bdfe9 100644 (file)
@@ -37,6 +37,7 @@
 #include "blk-wbt.h"
 #include "blk-mq-sched.h"
 
+static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
 
@@ -2499,6 +2500,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        spin_lock_init(&q->requeue_lock);
 
        blk_queue_make_request(q, blk_mq_make_request);
+       if (q->mq_ops->poll)
+               q->poll_fn = blk_mq_poll;
 
        /*
         * Do this after blk_queue_make_request() overrides it...
@@ -2961,20 +2964,14 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return false;
 }
 
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
 {
        struct blk_mq_hw_ctx *hctx;
-       struct blk_plug *plug;
        struct request *rq;
 
-       if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
-           !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+       if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
                return false;
 
-       plug = current->plug;
-       if (plug)
-               blk_flush_plug_list(plug, false);
-
        hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
        if (!blk_qc_t_is_internal(cookie))
                rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
@@ -2992,7 +2989,6 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
 
        return __blk_mq_poll(hctx, rq);
 }
-EXPORT_SYMBOL_GPL(blk_mq_poll);
 
 static int __init blk_mq_init(void)
 {
index 0d4c23dc453247eb79c795895f0b829912ff18fd..db632818777dd4d49bfe1b096eaeb4fb50e9b962 100644 (file)
@@ -94,7 +94,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
        cookie = submit_bio(bio);
 
-       blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
+       blk_poll(bdev_get_queue(req->ns->bdev), cookie);
 }
 
 static void nvmet_execute_flush(struct nvmet_req *req)
index 07ddccd1780173c5511e6178a68142dacf89b025..4afa4d5ff969e962bd16cc454904af0de20d5467 100644 (file)
@@ -237,7 +237,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                if (!READ_ONCE(bio.bi_private))
                        break;
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_mq_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -402,7 +402,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                        break;
 
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_mq_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
index 62cf812ed0e5803ac9148a9ea8a27f00efca7f2a..d2bc339cb1e98cb8c29808758db912c0752580d4 100644 (file)
@@ -486,7 +486,7 @@ static struct bio *dio_await_one(struct dio *dio)
                dio->waiter = current;
                spin_unlock_irqrestore(&dio->bio_lock, flags);
                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie))
+                   !blk_poll(dio->bio_disk->queue, dio->bio_cookie))
                        io_schedule();
                /* wake up sets us TASK_RUNNING */
                spin_lock_irqsave(&dio->bio_lock, flags);
index 8194d30bdca08e9cfa244e3a1df1df219ae1b806..4241bac905b19bea8b66c17cd684ab28906743a7 100644 (file)
@@ -1049,7 +1049,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
                            !dio->submit.last_queue ||
-                           !blk_mq_poll(dio->submit.last_queue,
+                           !blk_poll(dio->submit.last_queue,
                                         dio->submit.cookie))
                                io_schedule();
                }
index fddda6a1f9b587925f11c2492db3e246b56a6a6b..225617dd0a3f4b6b8ddb149371e3304faf1756cd 100644 (file)
@@ -266,6 +266,7 @@ struct blk_queue_ctx;
 
 typedef void (request_fn_proc) (struct request_queue *q);
 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 
@@ -408,6 +409,7 @@ struct request_queue {
 
        request_fn_proc         *request_fn;
        make_request_fn         *make_request_fn;
+       poll_q_fn               *poll_fn;
        prep_rq_fn              *prep_rq_fn;
        unprep_rq_fn            *unprep_rq_fn;
        softirq_done_fn         *softirq_done_fn;
@@ -975,7 +977,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 int blk_status_to_errno(blk_status_t status);
 blk_status_t errno_to_blk_status(int errno);
 
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
index 21502d341a67c2322054686d8f8adf2267a6ba34..ff04de630c465538b65bfd0f14dcf4ec1ea5548b 100644 (file)
@@ -407,7 +407,7 @@ int swap_readpage(struct page *page, bool do_poll)
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_mq_poll(disk->queue, qc))
+               if (!blk_poll(disk->queue, qc))
                        break;
        }
        __set_current_state(TASK_RUNNING);