DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
        u8 sqe_flags_allowed;
        u8 sqe_flags_required;
+       bool registered;
 };
 
 struct io_ring_ctx {
 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
 {
        if (ctx->sqo_thread) {
+               /*
+                * We may arrive here from the error branch in
+                * io_sq_offload_create() where the kthread is created
+                * without being waked up, thus wake it up now to make
+                * sure the wait will complete.
+                */
+               wake_up_process(ctx->sqo_thread);
+
                wait_for_completion(&ctx->sq_thread_comp);
                /*
                 * The park is a bit of a work-around, without it we get
        tsk->io_uring = NULL;
 }
 
-static int io_sq_offload_start(struct io_ring_ctx *ctx,
-                              struct io_uring_params *p)
+static int io_sq_offload_create(struct io_ring_ctx *ctx,
+                               struct io_uring_params *p)
 {
        int ret;
 
                ret = io_uring_alloc_task_context(ctx->sqo_thread);
                if (ret)
                        goto err;
-               wake_up_process(ctx->sqo_thread);
        } else if (p->flags & IORING_SETUP_SQ_AFF) {
                /* Can't have SQ_AFF without SQPOLL */
                ret = -EINVAL;
        return ret;
 }
 
+static void io_sq_offload_start(struct io_ring_ctx *ctx)
+{
+       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sqo_thread)
+               wake_up_process(ctx->sqo_thread);
+}
+
 static inline void __io_unaccount_mem(struct user_struct *user,
                                      unsigned long nr_pages)
 {
        if (!percpu_ref_tryget(&ctx->refs))
                goto out_fput;
 
+       ret = -EBADFD;
+       if (ctx->flags & IORING_SETUP_R_DISABLED)
+               goto out;
+
        /*
         * For SQ polling, the thread will do all submissions and completions.
         * Just return the requested submit count, and wake the thread if
        if (ret)
                goto err;
 
-       ret = io_sq_offload_start(ctx, p);
+       ret = io_sq_offload_create(ctx, p);
        if (ret)
                goto err;
 
+       if (!(p->flags & IORING_SETUP_R_DISABLED))
+               io_sq_offload_start(ctx);
+
        memset(&p->sq_off, 0, sizeof(p->sq_off));
        p->sq_off.head = offsetof(struct io_rings, sq.head);
        p->sq_off.tail = offsetof(struct io_rings, sq.tail);
 
        if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
                        IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
-                       IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
+                       IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
+                       IORING_SETUP_R_DISABLED))
                return -EINVAL;
 
        return  io_uring_create(entries, &p, params);
        size_t size;
        int i, ret;
 
+       /* Restrictions allowed only if rings started disabled */
+       if (!(ctx->flags & IORING_SETUP_R_DISABLED))
+               return -EBADFD;
+
        /* We allow only a single restrictions registration */
-       if (ctx->restricted)
+       if (ctx->restrictions.registered)
                return -EBUSY;
 
        if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
        if (ret != 0)
                memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
        else
-               ctx->restricted = 1;
+               ctx->restrictions.registered = true;
 
        kfree(res);
        return ret;
 }
 
+static int io_register_enable_rings(struct io_ring_ctx *ctx)
+{
+       if (!(ctx->flags & IORING_SETUP_R_DISABLED))
+               return -EBADFD;
+
+       if (ctx->restrictions.registered)
+               ctx->restricted = 1;
+
+       ctx->flags &= ~IORING_SETUP_R_DISABLED;
+
+       io_sq_offload_start(ctx);
+
+       return 0;
+}
+
 static bool io_register_op_must_quiesce(int op)
 {
        switch (op) {
                        break;
                ret = io_unregister_personality(ctx, nr_args);
                break;
+       case IORING_REGISTER_ENABLE_RINGS:
+               ret = -EINVAL;
+               if (arg || nr_args)
+                       break;
+               ret = io_register_enable_rings(ctx);
+               break;
        case IORING_REGISTER_RESTRICTIONS:
                ret = io_register_restrictions(ctx, arg, nr_args);
                break;