bcachefs: Kill journal_keys->journal_seq_base
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 12 Sep 2022 06:22:47 +0000 (02:22 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:40 +0000 (17:09 -0400)
This removes an optimization that didn't actually save us any memory,
due to alignment, but did make the code more complicated than it needed
to be. We were also seeing a bug where journal_seq_base wasn't getting
correctly initailized, so hopefully it'll fix that too.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/recovery.c

index c1d96222f4c3c6d30e98313536875aca9fb0abd0..74da688d994b641f7b3a1974615c4b8038bde8f3 100644 (file)
@@ -555,13 +555,13 @@ struct journal_seq_blacklist_table {
 
 struct journal_keys {
        struct journal_key {
+               u64             journal_seq;
+               u32             journal_offset;
                enum btree_id   btree_id:8;
                unsigned        level:8;
                bool            allocated;
                bool            overwritten;
                struct bkey_i   *k;
-               u32             journal_seq;
-               u32             journal_offset;
        }                       *d;
        /*
         * Gap buffer: instead of all the empty space in the array being at the
@@ -571,7 +571,6 @@ struct journal_keys {
        size_t                  gap;
        size_t                  nr;
        size_t                  size;
-       u64                     journal_seq_base;
 };
 
 struct btree_path_buf {
index 2cf347530b655417ca1e8a1c0842757800056ad7..ea8cc636a9e08b6ae007e93e125003226035596a 100644 (file)
@@ -222,7 +222,6 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
                struct journal_keys new_keys = {
                        .nr                     = keys->nr,
                        .size                   = max_t(size_t, keys->size, 8) * 2,
-                       .journal_seq_base       = keys->journal_seq_base,
                };
 
                new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
@@ -493,9 +492,6 @@ static int journal_keys_sort(struct bch_fs *c)
                if (!i || i->ignore)
                        continue;
 
-               if (!keys->journal_seq_base)
-                       keys->journal_seq_base = le64_to_cpu(i->j.seq);
-
                for_each_jset_key(k, _n, entry, &i->j)
                        nr_keys++;
        }
@@ -515,15 +511,12 @@ static int journal_keys_sort(struct bch_fs *c)
                if (!i || i->ignore)
                        continue;
 
-               BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX);
-
                for_each_jset_key(k, _n, entry, &i->j)
                        keys->d[keys->nr++] = (struct journal_key) {
                                .btree_id       = entry->btree_id,
                                .level          = entry->level,
                                .k              = k,
-                               .journal_seq    = le64_to_cpu(i->j.seq) -
-                                       keys->journal_seq_base,
+                               .journal_seq    = le64_to_cpu(i->j.seq),
                                .journal_offset = k->_data - i->j._data,
                        };
        }
@@ -617,15 +610,12 @@ static int bch2_journal_replay(struct bch_fs *c)
             sizeof(keys_sorted[0]),
             journal_sort_seq_cmp, NULL);
 
-       if (keys->nr)
-               replay_now_at(j, keys->journal_seq_base);
-
        for (i = 0; i < keys->nr; i++) {
                k = keys_sorted[i];
 
                cond_resched();
 
-               replay_now_at(j, keys->journal_seq_base + k->journal_seq);
+               replay_now_at(j, k->journal_seq);
 
                ret = bch2_trans_do(c, NULL, NULL,
                                    BTREE_INSERT_LAZY_RW|