return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
}
-static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
+static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned tail;
atomic_inc(&req->refs);
}
-static bool io_cqring_fill_event(struct io_kiocb *req, long res,
- unsigned int cflags)
+static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_cqe *cqe;
- trace_io_uring_complete(ctx, req->user_data, res, cflags);
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqring(ctx);
- if (likely(cqe)) {
- WRITE_ONCE(cqe->user_data, req->user_data);
- WRITE_ONCE(cqe->res, res);
- WRITE_ONCE(cqe->flags, cflags);
- return true;
- }
if (!atomic_read(&req->task->io_uring->in_idle)) {
struct io_overflow_cqe *ocqe;
return false;
}
+static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_cqe *cqe;
+
+ trace_io_uring_complete(ctx, req->user_data, res, cflags);
+
+ /*
+ * If we can't get a cq entry, userspace overflowed the
+ * submission (by quite a lot). Increment the overflow count in
+ * the ring.
+ */
+ cqe = io_get_cqring(ctx);
+ if (likely(cqe)) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
+ WRITE_ONCE(cqe->res, res);
+ WRITE_ONCE(cqe->flags, cflags);
+ return true;
+ }
+ return io_cqring_event_overflow(req, res, cflags);
+}
+
+/* not as hot to bloat with inlining */
+static noinline bool io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
+{
+ return __io_cqring_fill_event(req, res, cflags);
+}
+
static void io_req_complete_post(struct io_kiocb *req, long res,
unsigned int cflags)
{
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(req, res, cflags);
+ __io_cqring_fill_event(req, res, cflags);
/*
* If we're the last reference to this request, add to our locked
* free_list cache.
spin_lock_irq(&ctx->completion_lock);
for (i = 0; i < nr; i++) {
req = cs->reqs[i];
- io_cqring_fill_event(req, req->result, req->compl.cflags);
+ __io_cqring_fill_event(req, req->result, req->compl.cflags);
}
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
- io_cqring_fill_event(req, req->result, cflags);
+ __io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++;
if (req_ref_put_and_test(req))