{
        struct io_wqe *wqe = worker->wqe;
        struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
-       unsigned nr_workers;
 
        /*
         * If we're not at zero, someone else is holding a brief reference
                raw_spin_lock_irq(&wqe->lock);
        }
        acct->nr_workers--;
-       nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
-                       wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
        raw_spin_unlock_irq(&wqe->lock);
 
-       /* all workers gone, wq exit can proceed */
-       if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
-               complete(&wqe->wq->done);
-
        kfree_rcu(worker, rcu);
+       if (refcount_dec_and_test(&wqe->wq->refs))
+               complete(&wqe->wq->done);
 }
 
 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
 
 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
 {
-       struct io_wqe_acct *acct =&wqe->acct[index];
+       struct io_wqe_acct *acct = &wqe->acct[index];
        struct io_worker *worker;
 
        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
        if (index == IO_WQ_ACCT_UNBOUND)
                atomic_inc(&wq->user->processes);
 
+       refcount_inc(&wq->refs);
        wake_up_process(worker->task);
        return true;
 }
        return acct->nr_workers < acct->max_workers;
 }
 
+static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
+{
+       send_sig(SIGINT, worker->task, 1);
+       return false;
+}
+
+/*
+ * Iterate the passed in list and call the specific function for each
+ * worker that isn't exiting
+ */
+static bool io_wq_for_each_worker(struct io_wqe *wqe,
+                                 bool (*func)(struct io_worker *, void *),
+                                 void *data)
+{
+       struct io_worker *worker;
+       bool ret = false;
+
+       list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+               if (io_worker_get(worker)) {
+                       /* no task if node is/was offline */
+                       if (worker->task)
+                               ret = func(worker, data);
+                       io_worker_release(worker);
+                       if (ret)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+{
+       wake_up_process(worker->task);
+       return false;
+}
+
 /*
  * Manager thread. Tasked with creating new workers, if we need them.
  */
 static int io_wq_manager(void *data)
 {
        struct io_wq *wq = data;
-       int workers_to_create = num_possible_nodes();
        int node;
 
        /* create fixed workers */
-       refcount_set(&wq->refs, workers_to_create);
+       refcount_set(&wq->refs, 1);
        for_each_node(node) {
                if (!node_online(node))
                        continue;
-               if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
-                       goto err;
-               workers_to_create--;
+               if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
+                       continue;
+               set_bit(IO_WQ_BIT_ERROR, &wq->state);
+               set_bit(IO_WQ_BIT_EXIT, &wq->state);
+               goto out;
        }
 
-       while (workers_to_create--)
-               refcount_dec(&wq->refs);
-
        complete(&wq->done);
 
        while (!kthread_should_stop()) {
        if (current->task_works)
                task_work_run();
 
-       return 0;
-err:
-       set_bit(IO_WQ_BIT_ERROR, &wq->state);
-       set_bit(IO_WQ_BIT_EXIT, &wq->state);
-       if (refcount_sub_and_test(workers_to_create, &wq->refs))
+out:
+       if (refcount_dec_and_test(&wq->refs)) {
                complete(&wq->done);
+               return 0;
+       }
+       /* if ERROR is set and we get here, we have workers to wake */
+       if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
+               rcu_read_lock();
+               for_each_node(node)
+                       io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
+               rcu_read_unlock();
+       }
        return 0;
 }
 
        work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
 }
 
-static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
-{
-       send_sig(SIGINT, worker->task, 1);
-       return false;
-}
-
-/*
- * Iterate the passed in list and call the specific function for each
- * worker that isn't exiting
- */
-static bool io_wq_for_each_worker(struct io_wqe *wqe,
-                                 bool (*func)(struct io_worker *, void *),
-                                 void *data)
-{
-       struct io_worker *worker;
-       bool ret = false;
-
-       list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
-               if (io_worker_get(worker)) {
-                       /* no task if node is/was offline */
-                       if (worker->task)
-                               ret = func(worker, data);
-                       io_worker_release(worker);
-                       if (ret)
-                               break;
-               }
-       }
-
-       return ret;
-}
-
 void io_wq_cancel_all(struct io_wq *wq)
 {
        int node;
        return refcount_inc_not_zero(&wq->use_refs);
 }
 
-static bool io_wq_worker_wake(struct io_worker *worker, void *data)
-{
-       wake_up_process(worker->task);
-       return false;
-}
-
 static void __io_wq_destroy(struct io_wq *wq)
 {
        int node;