c->bucket_size_max = bucket_size_max;
 
-       if (c->capacity) {
-               bch2_io_timer_add(&c->io_clock[READ],
-                                &c->bucket_clock[READ].rescale);
-               bch2_io_timer_add(&c->io_clock[WRITE],
-                                &c->bucket_clock[WRITE].rescale);
-       } else {
-               bch2_io_timer_del(&c->io_clock[READ],
-                                &c->bucket_clock[READ].rescale);
-               bch2_io_timer_del(&c->io_clock[WRITE],
-                                &c->bucket_clock[WRITE].rescale);
-       }
-
        /* Wake up case someone was waiting for buckets */
        closure_wake_up(&c->freelist_wait);
 }
 
        bch2_copygc_stop(c);
        bch2_gc_thread_stop(c);
 
+       bch2_io_timer_del(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
+       bch2_io_timer_del(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
+
        /*
         * Flush journal before stopping allocators, because flushing journal
         * blacklist entries involves allocating new btree nodes:
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
+       bch2_io_timer_add(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
+       bch2_io_timer_add(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
+
        for_each_rw_member(ca, c, i) {
                ret = bch2_dev_allocator_start(ca);
                if (ret) {