bcachefs: Key cache now works for snapshots btrees
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 23 Nov 2022 01:15:33 +0000 (20:15 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:47 +0000 (17:09 -0400)
This switches btree_key_cache_fill() to use a btree iterator, not a
btree path, so that it can search for keys in previous snapshots.

We also add another iterator flag, BTREE_ITER_KEY_CACHE_FILL, to avoid
recursion back into the key cache.

This will allow us to re-enable the key cache for inodes in the next
patch.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_types.h
fs/bcachefs/inode.c

index b1580f6efb0f82fe14226d86a2b204e87b78539f..ecb6f27e49177b1ba51b37b2dab5a29e3ab1855e 100644 (file)
@@ -1855,6 +1855,10 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
        struct bkey_s_c k;
        int ret;
 
+       if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
+           bpos_eq(iter->pos, pos))
+               return bkey_s_c_null;
+
        if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
                return bkey_s_c_null;
 
index fc924fd2427476762734a76bcee9fccb9580ee69..c118d1b8241fd94c6210b10ee375d1e994f41059 100644 (file)
@@ -370,20 +370,20 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                                struct btree_path *ck_path,
                                struct bkey_cached *ck)
 {
-       struct btree_path *path;
+       struct btree_iter iter;
        struct bkey_s_c k;
        unsigned new_u64s = 0;
        struct bkey_i *new_k = NULL;
-       struct bkey u;
        int ret;
 
-       path = bch2_path_get(trans, ck->key.btree_id, ck->key.pos, 0, 0, 0);
-       ret = bch2_btree_path_traverse(trans, path, 0);
+       bch2_trans_iter_init(trans, &iter, ck->key.btree_id, ck->key.pos,
+                            BTREE_ITER_KEY_CACHE_FILL|
+                            BTREE_ITER_CACHED_NOFILL);
+       k = bch2_btree_iter_peek_slot(&iter);
+       ret = bkey_err(k);
        if (ret)
                goto err;
 
-       k = bch2_btree_path_peek_slot(path, &u);
-
        if (!bch2_btree_node_relock(trans, ck_path, 0)) {
                trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
                ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
@@ -431,9 +431,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
        bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
 
        /* We're not likely to need this iterator again: */
-       path->preserve = false;
+       set_btree_iter_dontneed(&iter);
 err:
-       bch2_path_put(trans, path, 0);
+       bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
 
index cdb887abcfe16b2a8f605dd762606d192a3f79cb..3f6ca40b52f615430f739102b932c0e56c6d8cdb 100644 (file)
@@ -208,6 +208,7 @@ struct btree_node_iter {
 #define BTREE_ITER_FILTER_SNAPSHOTS    (1 << 12)
 #define BTREE_ITER_NOPRESERVE          (1 << 13)
 #define BTREE_ITER_CACHED_NOFILL       (1 << 14)
+#define BTREE_ITER_KEY_CACHE_FILL      (1 << 15)
 
 enum btree_path_uptodate {
        BTREE_ITER_UPTODATE             = 0,
index b4f09d77148d62583c0cf72715f70a90016c5f00..4ca70c6c3a4f70682fa7dcdbcdb8cff472de0a6f 100644 (file)
@@ -684,8 +684,8 @@ retry:
 
        if (!bkey_is_inode(k.k)) {
                bch2_fs_inconsistent(trans.c,
-                                    "inode %llu not found when deleting",
-                                    inum.inum);
+                                    "inode %llu:%u not found when deleting",
+                                    inum.inum, snapshot);
                ret = -EIO;
                goto err;
        }