return 0;
 }
 
+static inline bool allocator_thread_running(struct bch_dev *ca)
+{
+       return ca->mi.state == BCH_MEMBER_STATE_RW &&
+               test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
+}
+
 /**
  * bch_allocator_thread - move buckets from free_inc to reserves
  *
        int ret;
 
        set_freezable();
-       ca->allocator_state = ALLOCATOR_RUNNING;
 
        while (1) {
+               if (!allocator_thread_running(ca)) {
+                       ca->allocator_state = ALLOCATOR_STOPPED;
+                       if (kthread_wait_freezable(allocator_thread_running(ca)))
+                               break;
+               }
+
+               ca->allocator_state = ALLOCATOR_RUNNING;
+
                cond_resched();
                if (kthread_should_stop())
                        break;
 
 
        set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
 
+       for_each_rw_member(ca, c, i)
+               bch2_wake_allocator(ca);
+
        ret = bch2_journal_reclaim_start(&c->journal);
        if (ret) {
                bch_err(c, "error starting journal reclaim: %i", ret);
 
 static void bch2_dev_free(struct bch_dev *ca)
 {
+       bch2_dev_allocator_stop(ca);
+
        cancel_work_sync(&ca->io_error_work);
 
        if (ca->kobj.state_in_sysfs &&
        if (!ca)
                goto err;
 
+       if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+           bch2_dev_allocator_start(ca)) {
+               bch2_dev_free(ca);
+               goto err;
+       }
+
        bch2_dev_attach(c, ca, dev_idx);
 out:
        pr_verbose_init(c->opts, "ret %i", ret);