io_uring: kill not necessary resubmit switch
authorPavel Begunkov <asml.silence@gmail.com>
Sun, 15 Aug 2021 09:40:21 +0000 (10:40 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 23 Aug 2021 19:10:37 +0000 (13:10 -0600)
773af69121ecc ("io_uring: always reissue from task_work context") makes
all resubmission to be made from task_work, so we don't need that hack
with resubmit/not-resubmit switch anymore.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/47fa177cca04e5ffd308a35227966c8e15d8525b.1628981736.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 005fc06f89b9424a679cefe6c80aa390947bc955..d6e0e7e317da586d641140dd0ca7e27fc1dee20c 100644 (file)
@@ -2293,7 +2293,7 @@ static inline bool io_run_task_work(void)
  * Find and free completed poll iocbs
  */
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                              struct list_head *done, bool resubmit)
+                              struct list_head *done)
 {
        struct req_batch rb;
        struct io_kiocb *req;
@@ -2308,7 +2308,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
+               if (READ_ONCE(req->result) == -EAGAIN &&
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
                        io_req_task_queue_reissue(req);
@@ -2331,7 +2331,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                       long min, bool resubmit)
+                       long min)
 {
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
@@ -2371,7 +2371,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
        }
 
        if (!list_empty(&done))
-               io_iopoll_complete(ctx, nr_events, &done, resubmit);
+               io_iopoll_complete(ctx, nr_events, &done);
 
        return 0;
 }
@@ -2389,7 +2389,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
        while (!list_empty(&ctx->iopoll_list)) {
                unsigned int nr_events = 0;
 
-               io_do_iopoll(ctx, &nr_events, 0, false);
+               io_do_iopoll(ctx, &nr_events, 0);
 
                /* let it sleep and repeat later if can't complete a request */
                if (nr_events == 0)
@@ -2451,7 +2451,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                            list_empty(&ctx->iopoll_list))
                                break;
                }
-               ret = io_do_iopoll(ctx, &nr_events, min, true);
+               ret = io_do_iopoll(ctx, &nr_events, min);
        } while (!ret && nr_events < min && !need_resched());
 out:
        mutex_unlock(&ctx->uring_lock);
@@ -6857,7 +6857,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 
                mutex_lock(&ctx->uring_lock);
                if (!list_empty(&ctx->iopoll_list))
-                       io_do_iopoll(ctx, &nr_events, 0, true);
+                       io_do_iopoll(ctx, &nr_events, 0);
 
                /*
                 * Don't submit if refs are dying, good for io_uring_register(),