if (req->flags & REQ_F_LINK_TIMEOUT) {
                struct io_ring_ctx *ctx = req->ctx;
 
-               spin_lock(&ctx->completion_lock);
+               spin_lock_irq(&ctx->timeout_lock);
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
-               spin_unlock(&ctx->completion_lock);
+               spin_unlock_irq(&ctx->timeout_lock);
        } else {
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
        int posted = 0, i;
 
        spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
        for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
                struct hlist_head *list;
 
                                posted += io_poll_remove_one(req);
                }
        }
+       spin_unlock_irq(&ctx->timeout_lock);
        spin_unlock(&ctx->completion_lock);
 
        if (posted)
                struct io_ring_ctx *ctx = req->ctx;
 
                /* protect against races with linked timeouts */
-               spin_lock(&ctx->completion_lock);
+               spin_lock_irq(&ctx->timeout_lock);
                ret = io_match_task(req, cancel->task, cancel->all);
-               spin_unlock(&ctx->completion_lock);
+               spin_unlock_irq(&ctx->timeout_lock);
        } else {
                ret = io_match_task(req, cancel->task, cancel->all);
        }
        LIST_HEAD(list);
 
        spin_lock(&ctx->completion_lock);
+       spin_lock_irq(&ctx->timeout_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
                if (io_match_task(de->req, task, cancel_all)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
        }
+       spin_unlock_irq(&ctx->timeout_lock);
        spin_unlock(&ctx->completion_lock);
        if (list_empty(&list))
                return false;