io_uring/rw: mark readv/writev as vectored in the opcode definition
authorJens Axboe <axboe@kernel.dk>
Mon, 11 Sep 2023 19:46:07 +0000 (13:46 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 21 Sep 2023 18:00:46 +0000 (12:00 -0600)
This is cleaner than gating on the opcode type, particularly as more
read/write type opcodes may be added.

Then we can use that for the data import, and for __io_read() on
whether or not we need to copy state.

Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/opdef.c
io_uring/opdef.h
io_uring/rw.c

index 3b9c6489b8b6d2732b1e349b1184a09a367c9f8b..f4090406550070c520f1d55c1bc19a62f0172f37 100644 (file)
@@ -63,6 +63,7 @@ const struct io_issue_def io_issue_defs[] = {
                .ioprio                 = 1,
                .iopoll                 = 1,
                .iopoll_queue           = 1,
+               .vectored               = 1,
                .prep                   = io_prep_rw,
                .issue                  = io_read,
        },
@@ -76,6 +77,7 @@ const struct io_issue_def io_issue_defs[] = {
                .ioprio                 = 1,
                .iopoll                 = 1,
                .iopoll_queue           = 1,
+               .vectored               = 1,
                .prep                   = io_prep_rw,
                .issue                  = io_write,
        },
index c22c8696e749ba7098bf4ad77f67bdececd98740..9e5435ec27d00f58ef74f5bbdfc188a6a35d2e4b 100644 (file)
@@ -29,6 +29,8 @@ struct io_issue_def {
        unsigned                iopoll_queue : 1;
        /* opcode specific path will handle ->async_data allocation if needed */
        unsigned                manual_alloc : 1;
+       /* vectored opcode, set if 1) vectored, and 2) handler needs to know */
+       unsigned                vectored : 1;
 
        int (*issue)(struct io_kiocb *, unsigned int);
        int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
index 5c58962d73dac25583b896614eba5bba413128cd..83ae911c2868cafa7a996ea54ae7bd298205c5c5 100644 (file)
@@ -388,8 +388,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
        buf = u64_to_user_ptr(rw->addr);
        sqe_len = rw->len;
 
-       if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
-           (req->flags & REQ_F_BUFFER_SELECT)) {
+       if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
                if (io_do_buffer_select(req)) {
                        buf = io_buffer_select(req, &sqe_len, issue_flags);
                        if (!buf)
@@ -776,8 +775,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
                req->flags &= ~REQ_F_REISSUE;
-               /* if we can poll, just do that */
-               if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
+               /*
+                * If we can poll, just do that. For a vectored read, we'll
+                * need to copy state first.
+                */
+               if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
                        return -EAGAIN;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))