bcachefs: Kill bch2_invalidate_bucket()
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 22 Jan 2021 23:19:15 +0000 (18:19 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:52 +0000 (17:08 -0400)
This patch is working towards eventually getting rid of the in memory
struct bucket, and relying only on the btree representation.

Since bch2_invalidate_bucket() was only used for incrementing gens, not
invalidating cached data, no other counters were being changed as a side
effect - meaning it's safe for the allocator code to increment the
bucket gen directly.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h

index e9200f8838941a6e2ff6674ee2193f560bc8d4c5..b306eed02a6d9fdec794a69d23a88415ffc38624 100644 (file)
@@ -896,34 +896,32 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
 
        /* first, put on free_inc and mark as owned by allocator: */
        percpu_down_read(&c->mark_lock);
-       spin_lock(&c->freelist_lock);
-
-       verify_not_on_freelist(c, ca, b);
-
-       BUG_ON(!fifo_push(&ca->free_inc, b));
-
        g = bucket(ca, b);
        m = READ_ONCE(g->mark);
 
-       invalidating_cached_data = m.cached_sectors != 0;
+       BUG_ON(m.data_type || m.dirty_sectors);
+
+       bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
+
+       spin_lock(&c->freelist_lock);
+       verify_not_on_freelist(c, ca, b);
+       BUG_ON(!fifo_push(&ca->free_inc, b));
+       spin_unlock(&c->freelist_lock);
 
        /*
         * If we're not invalidating cached data, we only increment the bucket
         * gen in memory here, the incremented gen will be updated in the btree
         * by bch2_trans_mark_pointer():
         */
+       if (!m.cached_sectors &&
+           !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
+               bucket_cmpxchg(g, m, m.gen++);
+               percpu_up_read(&c->mark_lock);
+               goto out;
+       }
 
-       if (!invalidating_cached_data)
-               bch2_invalidate_bucket(c, ca, b, &m);
-       else
-               bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
-
-       spin_unlock(&c->freelist_lock);
        percpu_up_read(&c->mark_lock);
 
-       if (!invalidating_cached_data)
-               goto out;
-
        /*
         * If the read-only path is trying to shut down, we can't be generating
         * new btree updates:
index 327d34b30de0bb8fe0e2333f60e8922950324ef8..c3d63a190154f31eba68460c238d796812b47851 100644 (file)
@@ -644,46 +644,6 @@ unwind:
        ret;                                                            \
 })
 
-static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
-                                   size_t b, struct bucket_mark *ret,
-                                   bool gc)
-{
-       struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
-       struct bucket *g = __bucket(ca, b, gc);
-       struct bucket_mark old, new;
-
-       old = bucket_cmpxchg(g, new, ({
-               BUG_ON(!is_available_bucket(new));
-
-               new.owned_by_allocator  = true;
-               new.data_type           = 0;
-               new.cached_sectors      = 0;
-               new.dirty_sectors       = 0;
-               new.gen++;
-       }));
-
-       bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
-
-       if (old.cached_sectors)
-               update_cached_sectors(c, fs_usage, ca->dev_idx,
-                                     -((s64) old.cached_sectors));
-
-       if (!gc)
-               *ret = old;
-       return 0;
-}
-
-void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
-                           size_t b, struct bucket_mark *old)
-{
-       do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
-                  ca, b, old);
-
-       if (!old->owned_by_allocator && old->cached_sectors)
-               trace_invalidate(ca, bucket_to_sector(ca, b),
-                                old->cached_sectors);
-}
-
 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
                                    size_t b, bool owned_by_allocator,
                                    bool gc)
index 14f53c92bb7b05db2c6bd2f96e232e35c13116fc..7eebae7c439df0682432793db98088fb08e79b8a 100644 (file)
@@ -236,8 +236,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
 void bch2_bucket_seq_cleanup(struct bch_fs *);
 void bch2_fs_usage_initialize(struct bch_fs *);
 
-void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
-                           size_t, struct bucket_mark *);
 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
                            size_t, bool, struct gc_pos, unsigned);
 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,