#include "journal_io.h"
 #include "quota.h"
 #include "recovery.h"
+#include "replicas.h"
 #include "super-io.h"
 
 #include <linux/stat.h>
 
 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
 
-struct bkey_i *btree_root_find(struct bch_fs *c,
-                              struct bch_sb_field_clean *clean,
-                              struct jset *j,
-                              enum btree_id id, unsigned *level)
+static struct bkey_i *btree_root_find(struct bch_fs *c,
+                                     struct bch_sb_field_clean *clean,
+                                     struct jset *j,
+                                     enum btree_id id, unsigned *level)
 {
        struct bkey_i *k;
        struct jset_entry *entry, *start, *end;
        return k;
 }
 
+static int journal_replay_entry_early(struct bch_fs *c,
+                                     struct jset_entry *entry)
+{
+       int ret = 0;
+
+       switch (entry->type) {
+       case BCH_JSET_ENTRY_btree_root: {
+               struct btree_root *r = &c->btree_roots[entry->btree_id];
+
+               if (entry->u64s) {
+                       r->level = entry->level;
+                       bkey_copy(&r->key, &entry->start[0]);
+                       r->error = 0;
+               } else {
+                       r->error = -EIO;
+               }
+               r->alive = true;
+               break;
+       }
+       case BCH_JSET_ENTRY_usage: {
+               struct jset_entry_usage *u =
+                       container_of(entry, struct jset_entry_usage, entry);
+
+               switch (u->type) {
+               case FS_USAGE_REPLICAS:
+                       ret = bch2_replicas_set_usage(c, &u->r,
+                                               le64_to_cpu(u->sectors));
+                       break;
+               case FS_USAGE_INODES:
+                       percpu_u64_set(&c->usage[0]->s.nr_inodes,
+                                               le64_to_cpu(u->sectors));
+                       break;
+               case FS_USAGE_KEY_VERSION:
+                       atomic64_set(&c->key_version,
+                                    le64_to_cpu(u->sectors));
+                       break;
+               }
+
+               break;
+       }
+       }
+
+       return ret;
+}
+
 static int verify_superblock_clean(struct bch_fs *c,
                                   struct bch_sb_field_clean *clean,
                                   struct jset *j)
 {
        const char *err = "cannot allocate memory";
        struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL;
+       struct jset_entry *entry;
        LIST_HEAD(journal);
        struct jset *j = NULL;
        unsigned i;
        fsck_err_on(clean && !journal_empty(&journal), c,
                    "filesystem marked clean but journal not empty");
 
+       err = "insufficient memory";
        if (clean) {
                c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
                c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
+
+               for (entry = clean->start;
+                    entry != vstruct_end(&clean->field);
+                    entry = vstruct_next(entry)) {
+                       ret = journal_replay_entry_early(c, entry);
+                       if (ret)
+                               goto err;
+               }
        } else {
+               struct journal_replay *i;
+
                c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
                c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
+
+               list_for_each_entry(i, &journal, list)
+                       vstruct_for_each(&i->j, entry) {
+                               ret = journal_replay_entry_early(c, entry);
+                               if (ret)
+                                       goto err;
+                       }
        }
 
        for (i = 0; i < BTREE_ID_NR; i++) {
-               unsigned level;
-               struct bkey_i *k;
+               struct btree_root *r = &c->btree_roots[i];
 
-               k = btree_root_find(c, clean, j, i, &level);
-               if (!k)
+               if (!r->alive)
                        continue;
 
                err = "invalid btree root pointer";
-               if (IS_ERR(k))
+               if (r->error)
                        goto err;
 
                err = "error reading btree root";
-               if (bch2_btree_root_read(c, i, k, level)) {
+               if (bch2_btree_root_read(c, i, &r->key, r->level)) {
                        if (i != BTREE_ID_ALLOC)
                                goto err;
 
 
        bch_verbose(c, "starting mark and sweep:");
        err = "error in recovery";
-       ret = bch2_initial_gc(c, &journal);
+       ret = bch2_gc(c, &journal, true);
        if (ret)
                goto err;
        bch_verbose(c, "mark and sweep done");
 
        clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
 
+       /*
+        * Skip past versions that might have possibly been used (as nonces),
+        * but hadn't had their pointers written:
+        */
+       if (c->sb.encryption_type && !c->sb.clean)
+               atomic64_add(1 << 16, &c->key_version);
+
        if (c->opts.noreplay)
                goto out;
 
        for (i = 0; i < BTREE_ID_NR; i++)
                bch2_btree_root_alloc(c, i);
 
-       ret = bch2_initial_gc(c, &journal);
+       ret = bch2_gc(c, &journal, true);
        if (ret)
                goto err;