From 29aa78f15e1bbd984cc14f395544d62b6f0a2a33 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 17 Oct 2022 02:04:31 -0400 Subject: [PATCH] bcachefs: Split out __btree_path_up_until_good_node() This breaks up btree_path_up_until_good_node() so that only the fastpath gets inlined. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index f928de6692aec..0bd67600f0db0 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -994,14 +994,9 @@ err: return ret; } -static inline bool btree_path_good_node(struct btree_trans *trans, - struct btree_path *path, - unsigned l, int check_pos) +static inline bool btree_path_check_pos_in_node(struct btree_path *path, + unsigned l, int check_pos) { - if (!is_btree_node(path, l) || - !bch2_btree_node_relock(trans, path, l)) - return false; - if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) return false; if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) @@ -1009,6 +1004,15 @@ static inline bool btree_path_good_node(struct btree_trans *trans, return true; } +static inline bool btree_path_good_node(struct btree_trans *trans, + struct btree_path *path, + unsigned l, int check_pos) +{ + return is_btree_node(path, l) && + bch2_btree_node_relock(trans, path, l) && + btree_path_check_pos_in_node(path, l, check_pos); +} + static void btree_path_set_level_down(struct btree_trans *trans, struct btree_path *path, unsigned new_level) @@ -1025,9 +1029,9 @@ static void btree_path_set_level_down(struct btree_trans *trans, bch2_btree_path_verify(trans, path); } -static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, - struct btree_path *path, - int check_pos) +static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) { unsigned i, l = path->level; again: @@ -1048,6 +1052,16 @@ again: return l; } +static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + return likely(btree_node_locked(path, path->level) && + btree_path_check_pos_in_node(path, path->level, check_pos)) + ? path->level + : __btree_path_up_until_good_node(trans, path, check_pos); +} + /* * This is the main state machine for walking down the btree - walks down to a * specified depth -- 2.30.2