static void io_req_task_queue(struct io_kiocb *req);
static void io_submit_flush_completions(struct io_comp_state *cs,
struct io_ring_ctx *ctx);
+static bool io_poll_remove_waitqs(struct io_kiocb *req);
static int io_req_prep_async(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
atomic_inc(&req->refs);
}
-static void __io_cqring_fill_event(struct io_kiocb *req, long res,
+static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, cflags);
- return;
+ return true;
}
if (!ctx->cq_overflow_flushed &&
!atomic_read(&req->task->io_uring->in_idle)) {
ocqe->cqe.res = res;
ocqe->cqe.flags = cflags;
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
- return;
+ return true;
}
overflow:
/*
* on the floor.
*/
WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
+ return false;
}
static void io_cqring_fill_event(struct io_kiocb *req, long res)
error = -ECANCELED;
req->poll.events |= EPOLLONESHOT;
}
- if (error || (req->poll.events & EPOLLONESHOT)) {
- io_poll_remove_double(req);
+ if (!error)
+ error = mangle_poll(mask);
+ if (!__io_cqring_fill_event(req, error, flags) ||
+ (req->poll.events & EPOLLONESHOT)) {
+ io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
}
- if (!error)
- error = mangle_poll(mask);
- __io_cqring_fill_event(req, error, flags);
io_commit_cqring(ctx);
return !(flags & IORING_CQE_F_MORE);
}
{
bool do_complete = false;
+ if (!poll->head)
+ return false;
spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {