bcachefs: Break out bch2_btree_path_traverse_cached_slowpath()
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 27 Sep 2022 02:34:49 +0000 (22:34 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:42 +0000 (17:09 -0400)
Prep work for further refactoring.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_key_cache.c

index 1a88d1d79699f1efbbe3faa38025001b284d6fd1..b26d4ffe2a11cbe32ac0a0006c2c50a8f6c91b32 100644 (file)
@@ -398,9 +398,9 @@ err:
        return ret;
 }
 
-__flatten
-int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
-                                   unsigned flags)
+noinline static int
+bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
+                                        unsigned flags)
 {
        struct bch_fs *c = trans->c;
        struct bkey_cached *ck;
@@ -481,6 +481,60 @@ err:
        return ret;
 }
 
+int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
+                                   unsigned flags)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_cached *ck;
+       int ret = 0;
+
+       EBUG_ON(path->level);
+
+       path->l[1].b = NULL;
+
+       if (bch2_btree_node_relock(trans, path, 0)) {
+               ck = (void *) path->l[0].b;
+               goto fill;
+       }
+retry:
+       ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
+       if (!ck) {
+               return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
+       } else {
+               enum six_lock_type lock_want = __btree_lock_want(path, 0);
+
+               ret = btree_node_lock(trans, path, (void *) ck, 0,
+                                     lock_want, _THIS_IP_);
+               EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
+
+               if (ret)
+                       return ret;
+
+               if (ck->key.btree_id != path->btree_id ||
+                   bpos_cmp(ck->key.pos, path->pos)) {
+                       six_unlock_type(&ck->c.lock, lock_want);
+                       goto retry;
+               }
+
+               mark_btree_node_locked(trans, path, 0, lock_want);
+       }
+
+       path->l[0].lock_seq     = ck->c.lock.state.seq;
+       path->l[0].b            = (void *) ck;
+fill:
+       if (!ck->valid)
+               return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
+
+       if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+               set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+
+       path->uptodate = BTREE_ITER_UPTODATE;
+       EBUG_ON(!ck->valid);
+       EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
+
+       return ret;
+}
+
 static int btree_key_cache_flush_pos(struct btree_trans *trans,
                                     struct bkey_cached_key key,
                                     u64 journal_seq,