bcachefs: Fix a use after free
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 14 Nov 2022 07:22:30 +0000 (02:22 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:46 +0000 (17:09 -0400)
This fixes a regression from percpu freedlists in the btree key cache
code: in a rare error path, we were immediately freeing a bkey_cached
that had been used before and should've waited for an SRCU barrier.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_key_cache.c

index 179669dbd688717f99e57b907cdc7a702608c8f2..1ac91221cc95b6a2383e5590c8644839959c972b 100644 (file)
@@ -190,7 +190,8 @@ static void bkey_cached_free_fast(struct btree_key_cache *bc,
 }
 
 static struct bkey_cached *
-bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
+bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
+                 bool *was_new)
 {
        struct bch_fs *c = trans->c;
        struct btree_key_cache *bc = &c->btree_key_cache;
@@ -275,6 +276,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
                ck->c.cached = true;
                BUG_ON(!six_trylock_intent(&ck->c.lock));
                BUG_ON(!six_trylock_write(&ck->c.lock));
+               *was_new = true;
                return ck;
        }
 
@@ -313,9 +315,9 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
        struct bch_fs *c = trans->c;
        struct btree_key_cache *bc = &c->btree_key_cache;
        struct bkey_cached *ck;
-       bool was_new = true;
+       bool was_new = false;
 
-       ck = bkey_cached_alloc(trans, path);
+       ck = bkey_cached_alloc(trans, path, &was_new);
        if (IS_ERR(ck))
                return ck;
 
@@ -328,7 +330,6 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                }
 
                mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
-               was_new = false;
        } else {
                if (path->btree_id == BTREE_ID_subvolumes)
                        six_lock_pcpu_alloc(&ck->c.lock);