* wake as many waiters as we need to.
*/
if (wq_has_sleeper(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
+ __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
- eventfd_signal(ctx->cq_ev_fd, 1);
+ eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
if (waitqueue_active(&ctx->poll_wait))
- wake_up_interruptible(&ctx->poll_wait);
+ __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
if (ctx->flags & IORING_SETUP_SQPOLL) {
if (waitqueue_active(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
+ __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
if (io_should_trigger_evfd(ctx))
- eventfd_signal(ctx->cq_ev_fd, 1);
+ eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
if (waitqueue_active(&ctx->poll_wait))
- wake_up_interruptible(&ctx->poll_wait);
+ __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+ poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
/* Returns true if there are no backlogged entries after the flush */
if (mask && !(mask & poll->events))
return 0;
- if (io_poll_get_ownership(req))
+ if (io_poll_get_ownership(req)) {
+ /*
+ * If we trigger a multishot poll off our own wakeup path,
+ * disable multishot as there is a circular dependency between
+ * CQ posting and triggering the event.
+ */
+ if (mask & EPOLL_URING_WAKE)
+ poll->events |= EPOLLONESHOT;
+
__io_poll_execute(req, mask);
+ }
return 1;
}