io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups
authorJens Axboe <axboe@kernel.dk>
Fri, 23 Dec 2022 14:04:49 +0000 (07:04 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Jan 2023 06:22:43 +0000 (07:22 +0100)
[ Upstream commit 4464853277d0ccdb9914608dd1332f0fa2f9846f ]

Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related
wakups, so that we can check for a circular event dependency between
eventfd and epoll. If this flag is set when our wakeup handlers are
called, then we know we have a dependency that needs to terminate
multishot requests.

eventfd and epoll are the only such possible dependencies.

Cc: stable@vger.kernel.org # 6.0
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
io_uring/io_uring.c

index 9a01188ff45a8c904ad33a4ac13faec5c6582a39..d855e668f37c5d09e58fbbe354a33aa8310f355f 100644 (file)
@@ -1629,13 +1629,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
         * wake as many waiters as we need to.
         */
        if (wq_has_sleeper(&ctx->cq_wait))
-               wake_up_all(&ctx->cq_wait);
+               __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
-               eventfd_signal(ctx->cq_ev_fd, 1);
+               eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
        if (waitqueue_active(&ctx->poll_wait))
-               wake_up_interruptible(&ctx->poll_wait);
+               __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1645,12 +1647,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                if (waitqueue_active(&ctx->cq_wait))
-                       wake_up_all(&ctx->cq_wait);
+                       __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
        }
        if (io_should_trigger_evfd(ctx))
-               eventfd_signal(ctx->cq_ev_fd, 1);
+               eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
        if (waitqueue_active(&ctx->poll_wait))
-               wake_up_interruptible(&ctx->poll_wait);
+               __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
 /* Returns true if there are no backlogged entries after the flush */
@@ -5636,8 +5640,17 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & poll->events))
                return 0;
 
-       if (io_poll_get_ownership(req))
+       if (io_poll_get_ownership(req)) {
+               /*
+                * If we trigger a multishot poll off our own wakeup path,
+                * disable multishot as there is a circular dependency between
+                * CQ posting and triggering the event.
+                */
+               if (mask & EPOLL_URING_WAKE)
+                       poll->events |= EPOLLONESHOT;
+
                __io_poll_execute(req, mask);
+       }
        return 1;
 }