From b605a7fabb607adbe4ea7cc97b69ae6e0555e7b2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 21 Mar 2022 22:02:23 +0000 Subject: [PATCH] io_uring: move poll recycling later in compl flushing There is a new (req->flags & REQ_F_POLLED) check in __io_submit_flush_completions() for poll recycling, however io_free_batch_list() is a much better place for it. First, we prefer it after putting the last req ref just to avoid potential problems in the future. Also, it'll enable the recycling for IOPOLL and also will place it closer to all other req->flags bits clean up requests. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/31dfe1dafda66ba3ce36b301884ec7e162c777d1.1647897811.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fddfbf0f93103..c6746b4168169 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2635,6 +2635,15 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, if (!req_ref_put_and_test(req)) continue; } + if ((req->flags & REQ_F_POLLED) && req->apoll) { + struct async_poll *apoll = req->apoll; + + if (apoll->double_poll) + kfree(apoll->double_poll); + list_add(&apoll->poll.wait.entry, + &ctx->apoll_cache); + req->flags &= ~REQ_F_POLLED; + } io_queue_next(req); if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) io_clean_op(req); @@ -2673,15 +2682,6 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) if (!(req->flags & REQ_F_CQE_SKIP)) __io_fill_cqe_req(req, req->result, req->cflags); - if ((req->flags & REQ_F_POLLED) && req->apoll) { - struct async_poll *apoll = req->apoll; - - if (apoll->double_poll) - kfree(apoll->double_poll); - list_add(&apoll->poll.wait.entry, - &ctx->apoll_cache); - req->flags &= ~REQ_F_POLLED; - } } io_commit_cqring(ctx); -- 2.30.2