static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
+       __must_hold(&req->ctx->timeout_lock)
 {
        struct io_kiocb *req;
 
        return false;
 }
 
+static bool io_match_linked(struct io_kiocb *head)
+{
+       struct io_kiocb *req;
+
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
+       }
+       return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+                              bool cancel_all)
+{
+       bool matched;
+
+       if (task && head->task != task)
+               return false;
+       if (cancel_all)
+               return true;
+
+       if (head->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = head->ctx;
+
+               /* protect against races with linked timeouts */
+               spin_lock_irq(&ctx->timeout_lock);
+               matched = io_match_linked(head);
+               spin_unlock_irq(&ctx->timeout_lock);
+       } else {
+               matched = io_match_linked(head);
+       }
+       return matched;
+}
+
 static inline void req_set_fail(struct io_kiocb *req)
 {
        req->flags |= REQ_F_FAIL;
        int posted = 0, i;
 
        spin_lock(&ctx->completion_lock);
-       spin_lock_irq(&ctx->timeout_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list;
 
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node) {
-                       if (io_match_task(req, tsk, cancel_all))
+                       if (io_match_task_safe(req, tsk, cancel_all))
                                posted += io_poll_remove_one(req);
                }
        }
-       spin_unlock_irq(&ctx->timeout_lock);
        spin_unlock(&ctx->completion_lock);
 
        if (posted)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
        struct io_task_cancel *cancel = data;
-       bool ret;
 
-       if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
-               struct io_ring_ctx *ctx = req->ctx;
-
-               /* protect against races with linked timeouts */
-               spin_lock_irq(&ctx->timeout_lock);
-               ret = io_match_task(req, cancel->task, cancel->all);
-               spin_unlock_irq(&ctx->timeout_lock);
-       } else {
-               ret = io_match_task(req, cancel->task, cancel->all);
-       }
-       return ret;
+       return io_match_task_safe(req, cancel->task, cancel->all);
 }
 
 static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
        LIST_HEAD(list);
 
        spin_lock(&ctx->completion_lock);
-       spin_lock_irq(&ctx->timeout_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
-               if (io_match_task(de->req, task, cancel_all)) {
+               if (io_match_task_safe(de->req, task, cancel_all)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
        }
-       spin_unlock_irq(&ctx->timeout_lock);
        spin_unlock(&ctx->completion_lock);
        if (list_empty(&list))
                return false;