bcachefs: Kill metadata only gc
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 22 Jan 2021 02:51:42 +0000 (21:51 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:52 +0000 (17:08 -0400)
This was useful before we had transactional updates to interior btree
nodes - but now, it's just extra unneeded complexity.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.h
fs/bcachefs/recovery.c

index 5608d8a0ed618e872470ca836bd9c6107569be18..6611047dcb0db983546a2262dc2f4ed18e6e0bdf 100644 (file)
@@ -205,13 +205,12 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
 }
 
 static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
-                        bool initial, bool metadata_only)
+                        bool initial)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct btree *b;
-       unsigned depth = metadata_only                  ? 1
-               : bch2_expensive_debug_checks           ? 0
+       unsigned depth = bch2_expensive_debug_checks    ? 0
                : !btree_node_type_needs_gc(btree_id)   ? 1
                : 0;
        u8 max_stale = 0;
@@ -326,13 +325,11 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
 
 static int bch2_gc_btree_init(struct bch_fs *c,
                              struct journal_keys *journal_keys,
-                             enum btree_id btree_id,
-                             bool metadata_only)
+                             enum btree_id btree_id)
 {
        struct btree *b;
-       unsigned target_depth = metadata_only           ? 1
-               : bch2_expensive_debug_checks           ? 0
-               : !btree_node_type_needs_gc(btree_id)   ? 1
+       unsigned target_depth = bch2_expensive_debug_checks     ? 0
+               : !btree_node_type_needs_gc(btree_id)           ? 1
                : 0;
        u8 max_stale = 0;
        int ret = 0;
@@ -377,7 +374,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
 }
 
 static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
-                         bool initial, bool metadata_only)
+                         bool initial)
 {
        enum btree_id ids[BTREE_ID_NR];
        unsigned i;
@@ -390,8 +387,8 @@ static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
                enum btree_id id = ids[i];
                int ret = initial
                        ? bch2_gc_btree_init(c, journal_keys,
-                                            id, metadata_only)
-                       : bch2_gc_btree(c, id, initial, metadata_only);
+                                            id)
+                       : bch2_gc_btree(c, id, initial);
                if (ret)
                        return ret;
        }
@@ -558,12 +555,11 @@ static void bch2_gc_free(struct bch_fs *c)
 }
 
 static int bch2_gc_done(struct bch_fs *c,
-                       bool initial, bool metadata_only)
+                       bool initial)
 {
        struct bch_dev *ca;
-       bool verify = !metadata_only &&
-               (!initial ||
-                (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)));
+       bool verify = (!initial ||
+                      (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)));
        unsigned i;
        int ret = 0;
 
@@ -601,7 +597,7 @@ static int bch2_gc_done(struct bch_fs *c,
 #define copy_fs_field(_f, _msg, ...)                                   \
        copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
 
-       if (!metadata_only) {
+       {
                struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
                struct stripe *dst, *src;
 
@@ -660,28 +656,20 @@ static int bch2_gc_done(struct bch_fs *c,
 
                copy_fs_field(hidden,           "hidden");
                copy_fs_field(btree,            "btree");
+               copy_fs_field(data,     "data");
+               copy_fs_field(cached,   "cached");
+               copy_fs_field(reserved, "reserved");
+               copy_fs_field(nr_inodes,"nr_inodes");
 
-               if (!metadata_only) {
-                       copy_fs_field(data,     "data");
-                       copy_fs_field(cached,   "cached");
-                       copy_fs_field(reserved, "reserved");
-                       copy_fs_field(nr_inodes,"nr_inodes");
-
-                       for (i = 0; i < BCH_REPLICAS_MAX; i++)
-                               copy_fs_field(persistent_reserved[i],
-                                             "persistent_reserved[%i]", i);
-               }
+               for (i = 0; i < BCH_REPLICAS_MAX; i++)
+                       copy_fs_field(persistent_reserved[i],
+                                     "persistent_reserved[%i]", i);
 
                for (i = 0; i < c->replicas.nr; i++) {
                        struct bch_replicas_entry *e =
                                cpu_replicas_entry(&c->replicas, i);
                        char buf[80];
 
-                       if (metadata_only &&
-                           (e->data_type == BCH_DATA_user ||
-                            e->data_type == BCH_DATA_cached))
-                               continue;
-
                        bch2_replicas_entry_to_text(&PBUF(buf), e);
 
                        copy_fs_field(replicas[i], "%s", buf);
@@ -697,8 +685,7 @@ fsck_err:
        return ret;
 }
 
-static int bch2_gc_start(struct bch_fs *c,
-                        bool metadata_only)
+static int bch2_gc_start(struct bch_fs *c)
 {
        struct bch_dev *ca;
        unsigned i;
@@ -762,13 +749,6 @@ static int bch2_gc_start(struct bch_fs *c,
 
                        d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
                        d->gen_valid = s->gen_valid;
-
-                       if (metadata_only &&
-                           (s->mark.data_type == BCH_DATA_user ||
-                            s->mark.data_type == BCH_DATA_cached)) {
-                               d->_mark = s->mark;
-                               d->_mark.owned_by_allocator = 0;
-                       }
                }
        };
 
@@ -796,7 +776,7 @@ static int bch2_gc_start(struct bch_fs *c,
  *    uses, GC could skip past them
  */
 int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
-           bool initial, bool metadata_only)
+           bool initial)
 {
        struct bch_dev *ca;
        u64 start_time = local_clock();
@@ -812,13 +792,13 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
        closure_wait_event(&c->btree_interior_update_wait,
                           !bch2_btree_interior_updates_nr_pending(c));
 again:
-       ret = bch2_gc_start(c, metadata_only);
+       ret = bch2_gc_start(c);
        if (ret)
                goto out;
 
        bch2_mark_superblocks(c);
 
-       ret = bch2_gc_btrees(c, journal_keys, initial, metadata_only);
+       ret = bch2_gc_btrees(c, journal_keys, initial);
        if (ret)
                goto out;
 
@@ -857,7 +837,7 @@ out:
                bch2_journal_block(&c->journal);
 
                percpu_down_write(&c->mark_lock);
-               ret = bch2_gc_done(c, initial, metadata_only);
+               ret = bch2_gc_done(c, initial);
 
                bch2_journal_unblock(&c->journal);
        } else {
index 3694a3df62a8c208271e64bdaa3b3439d697d89b..f0435a58793be24be2cbfa43e1c012c14084918a 100644 (file)
@@ -7,7 +7,7 @@
 void bch2_coalesce(struct bch_fs *);
 
 struct journal_keys;
-int bch2_gc(struct bch_fs *, struct journal_keys *, bool, bool);
+int bch2_gc(struct bch_fs *, struct journal_keys *, bool);
 int bch2_gc_gens(struct bch_fs *);
 void bch2_gc_thread_stop(struct bch_fs *);
 int bch2_gc_thread_start(struct bch_fs *);
index 8c67f146894522a3ef8871473ad866158b337a37..422f2fbe6dfb2d930054b7352f0d1e593c4a58bd 100644 (file)
@@ -1099,27 +1099,13 @@ use_clean:
 
        set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
 
-       if ((c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) &&
-           !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA))) {
-               /*
-                * interior btree node updates aren't consistent with the
-                * journal; after an unclean shutdown we have to walk all
-                * pointers to metadata:
-                */
-               bch_info(c, "starting metadata mark and sweep");
-               err = "error in mark and sweep";
-               ret = bch2_gc(c, &c->journal_keys, true, true);
-               if (ret)
-                       goto err;
-               bch_verbose(c, "mark and sweep done");
-       }
-
        if (c->opts.fsck ||
            !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
+           !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
            test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
                bch_info(c, "starting mark and sweep");
                err = "error in mark and sweep";
-               ret = bch2_gc(c, &c->journal_keys, true, false);
+               ret = bch2_gc(c, &c->journal_keys, true);
                if (ret)
                        goto err;
                bch_verbose(c, "mark and sweep done");