io_queue_linked_timeout(link);
}
+static void io_tw_requeue_iowq(struct io_kiocb *req, struct io_tw_state *ts)
+{
+ req->flags &= ~REQ_F_REISSUE;
+ io_queue_iowq(req);
+}
+
+void io_tw_queue_iowq(struct io_kiocb *req)
+{
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+ req->io_task_work.func = io_tw_requeue_iowq;
+ io_req_task_work_add(req);
+}
+
static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
{
while (!list_empty(&ctx->defer_list)) {
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
+void io_tw_queue_iowq(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
* current cycle.
*/
io_req_io_end(req);
- req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+ io_tw_queue_iowq(req);
return true;
}
req_set_fail(req);
io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
if (res == -EAGAIN && io_rw_should_reissue(req)) {
- req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+ io_tw_queue_iowq(req);
return;
}
req->cqe.res = res;
ret = io_iter_do_read(rw, &io->iter);
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
- req->flags &= ~REQ_F_REISSUE;
+ if (req->flags & REQ_F_REISSUE)
+ return IOU_ISSUE_SKIP_COMPLETE;
/* If we can poll, just do that. */
if (io_file_can_poll(req))
return -EAGAIN;
else
ret2 = -EINVAL;
- if (req->flags & REQ_F_REISSUE) {
- req->flags &= ~REQ_F_REISSUE;
- ret2 = -EAGAIN;
- }
+ if (req->flags & REQ_F_REISSUE)
+ return IOU_ISSUE_SKIP_COMPLETE;
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just