bcachefs: journal_replay_early()
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 25 Jan 2019 00:09:49 +0000 (19:09 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:15 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.h
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/recovery.c
fs/bcachefs/replicas.c
fs/bcachefs/replicas.h

index b63dcbdb95c01760381f1536e26083d20bea9c31..ac3fa1efb6499834c56b1ee642531074f6702d60 100644 (file)
@@ -1249,19 +1249,3 @@ int bch2_gc_thread_start(struct bch_fs *c)
        wake_up_process(p);
        return 0;
 }
-
-/* Initial GC computes bucket marks during startup */
-
-int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
-{
-       int ret = bch2_gc(c, journal, true);
-
-       /*
-        * Skip past versions that might have possibly been used (as nonces),
-        * but hadn't had their pointers written:
-        */
-       if (c->sb.encryption_type)
-               atomic64_add(1 << 16, &c->key_version);
-
-       return ret;
-}
index 89ee72ac49f6fbf1f97823dc1213ea00321ea9c1..9eb2b0527a9208233d55560b281ee49de8863c09 100644 (file)
@@ -8,7 +8,6 @@ void bch2_coalesce(struct bch_fs *);
 int bch2_gc(struct bch_fs *, struct list_head *, bool);
 void bch2_gc_thread_stop(struct bch_fs *);
 int bch2_gc_thread_start(struct bch_fs *);
-int bch2_initial_gc(struct bch_fs *, struct list_head *);
 void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
 
 /*
index ce5127301cb23e35fa149167a5aa63208e4d52f3..b5a4853451a71b4674f6c4754e55586e53fff6d1 100644 (file)
@@ -475,6 +475,7 @@ struct btree_root {
        __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
        u8                      level;
        u8                      alive;
+       s8                      error;
 };
 
 /*
index 4bc7be9b5298113cd476093ca7828f49fa113b55..451b293c44a6d4bea52c4c630fa746d4f6741eae 100644 (file)
@@ -2122,7 +2122,6 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
        BUG_ON(btree_node_root(c, b));
 
        __bch2_btree_set_root_inmem(c, b);
-       bch2_btree_set_root_ondisk(c, b, READ);
 }
 
 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
index cb9601dfcd3721c7d2ae53819e631981caeb8c2c..6349c394be45b641f34860da23be1a2a517ff924 100644 (file)
 #include "journal_io.h"
 #include "quota.h"
 #include "recovery.h"
+#include "replicas.h"
 #include "super-io.h"
 
 #include <linux/stat.h>
 
 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
 
-struct bkey_i *btree_root_find(struct bch_fs *c,
-                              struct bch_sb_field_clean *clean,
-                              struct jset *j,
-                              enum btree_id id, unsigned *level)
+static struct bkey_i *btree_root_find(struct bch_fs *c,
+                                     struct bch_sb_field_clean *clean,
+                                     struct jset *j,
+                                     enum btree_id id, unsigned *level)
 {
        struct bkey_i *k;
        struct jset_entry *entry, *start, *end;
@@ -50,6 +51,51 @@ found:
        return k;
 }
 
+static int journal_replay_entry_early(struct bch_fs *c,
+                                     struct jset_entry *entry)
+{
+       int ret = 0;
+
+       switch (entry->type) {
+       case BCH_JSET_ENTRY_btree_root: {
+               struct btree_root *r = &c->btree_roots[entry->btree_id];
+
+               if (entry->u64s) {
+                       r->level = entry->level;
+                       bkey_copy(&r->key, &entry->start[0]);
+                       r->error = 0;
+               } else {
+                       r->error = -EIO;
+               }
+               r->alive = true;
+               break;
+       }
+       case BCH_JSET_ENTRY_usage: {
+               struct jset_entry_usage *u =
+                       container_of(entry, struct jset_entry_usage, entry);
+
+               switch (u->type) {
+               case FS_USAGE_REPLICAS:
+                       ret = bch2_replicas_set_usage(c, &u->r,
+                                               le64_to_cpu(u->sectors));
+                       break;
+               case FS_USAGE_INODES:
+                       percpu_u64_set(&c->usage[0]->s.nr_inodes,
+                                               le64_to_cpu(u->sectors));
+                       break;
+               case FS_USAGE_KEY_VERSION:
+                       atomic64_set(&c->key_version,
+                                    le64_to_cpu(u->sectors));
+                       break;
+               }
+
+               break;
+       }
+       }
+
+       return ret;
+}
+
 static int verify_superblock_clean(struct bch_fs *c,
                                   struct bch_sb_field_clean *clean,
                                   struct jset *j)
@@ -126,6 +172,7 @@ int bch2_fs_recovery(struct bch_fs *c)
 {
        const char *err = "cannot allocate memory";
        struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL;
+       struct jset_entry *entry;
        LIST_HEAD(journal);
        struct jset *j = NULL;
        unsigned i;
@@ -178,28 +225,44 @@ int bch2_fs_recovery(struct bch_fs *c)
        fsck_err_on(clean && !journal_empty(&journal), c,
                    "filesystem marked clean but journal not empty");
 
+       err = "insufficient memory";
        if (clean) {
                c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
                c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
+
+               for (entry = clean->start;
+                    entry != vstruct_end(&clean->field);
+                    entry = vstruct_next(entry)) {
+                       ret = journal_replay_entry_early(c, entry);
+                       if (ret)
+                               goto err;
+               }
        } else {
+               struct journal_replay *i;
+
                c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
                c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
+
+               list_for_each_entry(i, &journal, list)
+                       vstruct_for_each(&i->j, entry) {
+                               ret = journal_replay_entry_early(c, entry);
+                               if (ret)
+                                       goto err;
+                       }
        }
 
        for (i = 0; i < BTREE_ID_NR; i++) {
-               unsigned level;
-               struct bkey_i *k;
+               struct btree_root *r = &c->btree_roots[i];
 
-               k = btree_root_find(c, clean, j, i, &level);
-               if (!k)
+               if (!r->alive)
                        continue;
 
                err = "invalid btree root pointer";
-               if (IS_ERR(k))
+               if (r->error)
                        goto err;
 
                err = "error reading btree root";
-               if (bch2_btree_root_read(c, i, k, level)) {
+               if (bch2_btree_root_read(c, i, &r->key, r->level)) {
                        if (i != BTREE_ID_ALLOC)
                                goto err;
 
@@ -226,13 +289,20 @@ int bch2_fs_recovery(struct bch_fs *c)
 
        bch_verbose(c, "starting mark and sweep:");
        err = "error in recovery";
-       ret = bch2_initial_gc(c, &journal);
+       ret = bch2_gc(c, &journal, true);
        if (ret)
                goto err;
        bch_verbose(c, "mark and sweep done");
 
        clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
 
+       /*
+        * Skip past versions that might have possibly been used (as nonces),
+        * but hadn't had their pointers written:
+        */
+       if (c->sb.encryption_type && !c->sb.clean)
+               atomic64_add(1 << 16, &c->key_version);
+
        if (c->opts.noreplay)
                goto out;
 
@@ -319,7 +389,7 @@ int bch2_fs_initialize(struct bch_fs *c)
        for (i = 0; i < BTREE_ID_NR; i++)
                bch2_btree_root_alloc(c, i);
 
-       ret = bch2_initial_gc(c, &journal);
+       ret = bch2_gc(c, &journal, true);
        if (ret)
                goto err;
 
index 8495cac29a1435f48fd6fb9e9ed2a58bbc285a28..52a422ac5ace513f5a93c99c6d0203cbff3f8a3e 100644 (file)
@@ -530,6 +530,34 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
        return 0;
 }
 
+int bch2_replicas_set_usage(struct bch_fs *c,
+                           struct bch_replicas_entry *r,
+                           u64 sectors)
+{
+       int ret, idx = bch2_replicas_entry_idx(c, r);
+
+       if (idx < 0) {
+               struct bch_replicas_cpu n;
+
+               n = cpu_replicas_add_entry(&c->replicas, r);
+               if (!n.entries)
+                       return -ENOMEM;
+
+               ret = replicas_table_update(c, &n);
+               if (ret)
+                       return ret;
+
+               kfree(n.entries);
+
+               idx = bch2_replicas_entry_idx(c, r);
+               BUG_ON(ret < 0);
+       }
+
+       percpu_u64_set(&c->usage[0]->data[idx], sectors);
+
+       return 0;
+}
+
 /* Replicas tracking - superblock: */
 
 static int
index 35164887dffb8ea5f92b605e7a9da419ee0569ae..d1457c786bb5894297d00a99d78771c5c4d0b40f 100644 (file)
@@ -57,6 +57,10 @@ unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
 int bch2_replicas_gc_end(struct bch_fs *, int);
 int bch2_replicas_gc_start(struct bch_fs *, unsigned);
 
+int bch2_replicas_set_usage(struct bch_fs *,
+                           struct bch_replicas_entry *,
+                           u64);
+
 #define for_each_cpu_replicas_entry(_r, _i)                            \
        for (_i = (_r)->entries;                                        \
             (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\