bcachefs: Move snapshot table size to struct snapshot_table
authorKent Overstreet <kent.overstreet@linux.dev>
Fri, 22 Mar 2024 00:16:23 +0000 (20:16 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Apr 2024 00:36:11 +0000 (20:36 -0400)
We need to add bounds checking for snapshot table accesses - it turns
out there are cases where we do need to use the snapshots table before
fsck checks have completed (and indeed, fsck may not have been run).

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/snapshot.c
fs/bcachefs/snapshot.h
fs/bcachefs/subvolume_types.h

index 799aa32b6b4d990f913b0d5dfb98b6a47af1f0b2..a9ade0b1f78cf2e39ea52ec1ebe2db1f39ded7e1 100644 (file)
@@ -810,7 +810,6 @@ struct bch_fs {
 
        /* snapshot.c: */
        struct snapshot_table __rcu *snapshots;
-       size_t                  snapshot_table_size;
        struct mutex            snapshot_table_lock;
        struct rw_semaphore     snapshot_create_lock;
 
index 39debe814bf392acb76c7cebe6752736d6c57cff..9cd71e613dc9b10db8fe26505e58ff4dead58982 100644 (file)
@@ -151,36 +151,39 @@ out:
 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
 {
        size_t idx = U32_MAX - id;
-       size_t new_size;
        struct snapshot_table *new, *old;
 
-       new_size = max(16UL, roundup_pow_of_two(idx + 1));
+       size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
+       size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
 
-       new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
+       new = kvzalloc(new_bytes, GFP_KERNEL);
        if (!new)
                return NULL;
 
+       new->nr = new_size;
+
        old = rcu_dereference_protected(c->snapshots, true);
        if (old)
-               memcpy(new->s,
-                      rcu_dereference_protected(c->snapshots, true)->s,
-                      sizeof(new->s[0]) * c->snapshot_table_size);
+               memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
 
        rcu_assign_pointer(c->snapshots, new);
-       c->snapshot_table_size = new_size;
-       kvfree_rcu_mightsleep(old);
+       kvfree_rcu(old, rcu);
 
-       return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+       return &rcu_dereference_protected(c->snapshots,
+                               lockdep_is_held(&c->snapshot_table_lock))->s[idx];
 }
 
 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
 {
        size_t idx = U32_MAX - id;
+       struct snapshot_table *table =
+               rcu_dereference_protected(c->snapshots,
+                               lockdep_is_held(&c->snapshot_table_lock));
 
        lockdep_assert_held(&c->snapshot_table_lock);
 
-       if (likely(idx < c->snapshot_table_size))
-               return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+       if (likely(table && idx < table->nr))
+               return &table->s[idx];
 
        return __snapshot_t_mut(c, id);
 }
index 7c66ffc06385ddea63685298f691660d906055d5..8538b7fddfed3830dd8e4b70d4cb1bc78a11e2c1 100644 (file)
@@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
 
 static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
 {
-       return &t->s[U32_MAX - id];
+       u32 idx = U32_MAX - id;
+
+       return likely(t && idx < t->nr)
+               ? &t->s[idx]
+               : NULL;
 }
 
 static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
index ae644adfc391680d85b6fe53c25f08ae9337e037..9b10c8947828e0d40db0a63f7d3db22457769d46 100644 (file)
@@ -20,6 +20,8 @@ struct snapshot_t {
 };
 
 struct snapshot_table {
+       struct rcu_head         rcu;
+       size_t                  nr;
 #ifndef RUST_BINDGEN
        DECLARE_FLEX_ARRAY(struct snapshot_t, s);
 #else