io_uring: move poll recycling later in compl flushing
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 21 Mar 2022 22:02:23 +0000 (22:02 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 24 Apr 2022 23:34:16 +0000 (17:34 -0600)
There is a new (req->flags & REQ_F_POLLED) check in
__io_submit_flush_completions() for poll recycling, however
io_free_batch_list() is a much better place for it. First, we prefer it
after putting the last req ref just to avoid potential problems in the
future. Also, it'll enable the recycling for IOPOLL and also will place
it closer to all other req->flags bits clean up requests.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/31dfe1dafda66ba3ce36b301884ec7e162c777d1.1647897811.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index fddfbf0f931030e7481738488ed648447a9f3a6c..c6746b41681696e658e50a6f101369ba8faebb02 100644 (file)
@@ -2635,6 +2635,15 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
                                if (!req_ref_put_and_test(req))
                                        continue;
                        }
+                       if ((req->flags & REQ_F_POLLED) && req->apoll) {
+                               struct async_poll *apoll = req->apoll;
+
+                               if (apoll->double_poll)
+                                       kfree(apoll->double_poll);
+                               list_add(&apoll->poll.wait.entry,
+                                               &ctx->apoll_cache);
+                               req->flags &= ~REQ_F_POLLED;
+                       }
                        io_queue_next(req);
                        if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
                                io_clean_op(req);
@@ -2673,15 +2682,6 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
 
                        if (!(req->flags & REQ_F_CQE_SKIP))
                                __io_fill_cqe_req(req, req->result, req->cflags);
-                       if ((req->flags & REQ_F_POLLED) && req->apoll) {
-                               struct async_poll *apoll = req->apoll;
-
-                               if (apoll->double_poll)
-                                       kfree(apoll->double_poll);
-                               list_add(&apoll->poll.wait.entry,
-                                               &ctx->apoll_cache);
-                               req->flags &= ~REQ_F_POLLED;
-                       }
                }
 
                io_commit_cqring(ctx);