io_uring: move common poll bits
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 15 Dec 2021 22:08:46 +0000 (22:08 +0000)
committerJens Axboe <axboe@kernel.dk>
Tue, 28 Dec 2021 17:51:14 +0000 (09:51 -0800)
Move some poll helpers/etc up, we'll need them there shortly

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6c5c3dba24c86aad5cd389a54a8c7412e6a0621d.1639605189.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 105593455775ccc51df8eb01401d60ef3ad8df88..8cabe4a0d38f49f139bcfcf9d57e28e961fcb328 100644 (file)
@@ -5353,6 +5353,43 @@ struct io_poll_table {
        int error;
 };
 
+static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
+{
+       /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return req->async_data;
+       return req->apoll->double_poll;
+}
+
+static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
+{
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return &req->poll;
+       return &req->apoll->poll;
+}
+
+static void io_poll_req_insert(struct io_kiocb *req)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       struct hlist_head *list;
+
+       list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
+       hlist_add_head(&req->hash_node, list);
+}
+
+static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
+                             wait_queue_func_t wake_func)
+{
+       poll->head = NULL;
+       poll->done = false;
+       poll->canceled = false;
+#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
+       /* mask in events that we always want/need */
+       poll->events = events | IO_POLL_UNMASK;
+       INIT_LIST_HEAD(&poll->wait.entry);
+       init_waitqueue_func_entry(&poll->wait, wake_func);
+}
+
 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
                           __poll_t mask, io_req_tw_func_t func)
 {
@@ -5401,21 +5438,6 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
        return false;
 }
 
-static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
-{
-       /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
-       if (req->opcode == IORING_OP_POLL_ADD)
-               return req->async_data;
-       return req->apoll->double_poll;
-}
-
-static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
-{
-       if (req->opcode == IORING_OP_POLL_ADD)
-               return &req->poll;
-       return &req->apoll->poll;
-}
-
 static void io_poll_remove_double(struct io_kiocb *req)
        __must_hold(&req->ctx->completion_lock)
 {
@@ -5530,19 +5552,6 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
        return 1;
 }
 
-static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
-                             wait_queue_func_t wake_func)
-{
-       poll->head = NULL;
-       poll->done = false;
-       poll->canceled = false;
-#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
-       /* mask in events that we always want/need */
-       poll->events = events | IO_POLL_UNMASK;
-       INIT_LIST_HEAD(&poll->wait.entry);
-       init_waitqueue_func_entry(&poll->wait, wake_func);
-}
-
 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                            struct wait_queue_head *head,
                            struct io_poll_iocb **poll_ptr)
@@ -5640,15 +5649,6 @@ static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
 }
 
-static void io_poll_req_insert(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-       struct hlist_head *list;
-
-       list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
-       hlist_add_head(&req->hash_node, list);
-}
-
 static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
                                      struct io_poll_iocb *poll,
                                      struct io_poll_table *ipt, __poll_t mask,