io_uring: optimise submission loop invariant
authorPavel Begunkov <asml.silence@gmail.com>
Tue, 12 Apr 2022 14:09:49 +0000 (15:09 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Apr 2022 00:02:46 +0000 (18:02 -0600)
Instead of keeping @submitted in io_submit_sqes(), which for each
iteration requires comparison with the initial number of SQEs, store the
number of SQEs left to submit. We'll need nr only for when we're done
with SQE handling.

note: if we can't allocate a req for the first SQE we always has been
returning -EAGAIN to the userspace, save this behaviour by looking into
the cache in a slow path.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c3b3df9aeae4c2f7a53fd8386385742e4e261e77.1649771823.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 1cdfcdd0a74ff1368bd15f7b6b6d079f79a7b351..3e84963b14ff7cdc36b153c9084ffab056d5ed6c 100644 (file)
@@ -7855,24 +7855,22 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
        __must_hold(&ctx->uring_lock)
 {
        unsigned int entries = io_sqring_entries(ctx);
-       int submitted = 0;
+       unsigned int left;
+       int ret;
 
        if (unlikely(!entries))
                return 0;
        /* make sure SQ entry isn't read before tail */
-       nr = min3(nr, ctx->sq_entries, entries);
-       io_get_task_refs(nr);
+       ret = left = min3(nr, ctx->sq_entries, entries);
+       io_get_task_refs(left);
+       io_submit_state_start(&ctx->submit_state, left);
 
-       io_submit_state_start(&ctx->submit_state, nr);
        do {
                const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
 
-               if (unlikely(!io_alloc_req_refill(ctx))) {
-                       if (!submitted)
-                               submitted = -EAGAIN;
+               if (unlikely(!io_alloc_req_refill(ctx)))
                        break;
-               }
                req = io_alloc_req(ctx);
                sqe = io_get_sqe(ctx);
                if (unlikely(!sqe)) {
@@ -7880,7 +7878,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
                        break;
                }
                /* will complete beyond this point, count as submitted */
-               submitted++;
+               left--;
                if (io_submit_sqe(ctx, req, sqe)) {
                        /*
                         * Continue submitting even for sqe failure if the
@@ -7889,20 +7887,20 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
                        if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL))
                                break;
                }
-       } while (submitted < nr);
+       } while (left);
 
-       if (unlikely(submitted != nr)) {
-               int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
-               int unused = nr - ref_used;
-
-               current->io_uring->cached_refs += unused;
+       if (unlikely(left)) {
+               ret -= left;
+               /* try again if it submitted nothing and can't allocate a req */
+               if (!ret && io_req_cache_empty(ctx))
+                       ret = -EAGAIN;
+               current->io_uring->cached_refs += left;
        }
 
        io_submit_state_end(ctx);
         /* Commit SQ ring head once we've consumed and submitted all SQEs */
        io_commit_sqring(ctx);
-
-       return submitted;
+       return ret;
 }
 
 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)