io_uring: add CQE32 setup processing
authorStefan Roesch <shr@fb.com>
Tue, 26 Apr 2022 18:21:26 +0000 (11:21 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 9 May 2022 12:35:34 +0000 (06:35 -0600)
This adds two new function to setup and fill the CQE32 result structure.

Signed-off-by: Stefan Roesch <shr@fb.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Link: https://lore.kernel.org/r/20220426182134.136504-5-shr@fb.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 279ccf674bdd4dd7073a0de6bcb5adbfc83fdaf9..62434abf914cc57de0f44c76822584ada5c97e94 100644 (file)
@@ -2335,12 +2335,70 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
                                        req->cqe.res, req->cqe.flags);
 }
 
+static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
+                                             struct io_kiocb *req)
+{
+       struct io_uring_cqe *cqe;
+       u64 extra1 = req->extra1;
+       u64 extra2 = req->extra2;
+
+       trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+                               req->cqe.res, req->cqe.flags);
+
+       /*
+        * If we can't get a cq entry, userspace overflowed the
+        * submission (by quite a lot). Increment the overflow count in
+        * the ring.
+        */
+       cqe = io_get_cqe(ctx);
+       if (likely(cqe)) {
+               memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
+               cqe->big_cqe[0] = extra1;
+               cqe->big_cqe[1] = extra2;
+               return true;
+       }
+
+       return io_cqring_event_overflow(ctx, req->cqe.user_data,
+                                       req->cqe.res, req->cqe.flags);
+}
+
 static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
 {
        trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags);
        return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
 }
 
+static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags,
+                               u64 extra1, u64 extra2)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_uring_cqe *cqe;
+
+       if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
+               return;
+       if (req->flags & REQ_F_CQE_SKIP)
+               return;
+
+       trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags);
+
+       /*
+        * If we can't get a cq entry, userspace overflowed the
+        * submission (by quite a lot). Increment the overflow count in
+        * the ring.
+        */
+       cqe = io_get_cqe(ctx);
+       if (likely(cqe)) {
+               WRITE_ONCE(cqe->user_data, req->cqe.user_data);
+               WRITE_ONCE(cqe->res, res);
+               WRITE_ONCE(cqe->flags, cflags);
+               WRITE_ONCE(cqe->big_cqe[0], extra1);
+               WRITE_ONCE(cqe->big_cqe[1], extra2);
+               return;
+       }
+
+       io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags);
+}
+
 static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
                                     s32 res, u32 cflags)
 {