bcachefs: Optimize btree lookups in write path
authorKent Overstreet <kent.overstreet@gmail.com>
Tue, 7 Sep 2021 19:34:16 +0000 (15:34 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:12 +0000 (17:09 -0400)
This patch significantly reduces the number of btree lookups required in
the extent update path.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/io.c

index 3aa2777e40a5bd2cb3ac4ced661e9c49b1f4b43d..d2ee6e9aa370124f83cd719167421dd72c81ca84 100644 (file)
@@ -1805,6 +1805,12 @@ hole:
 
 /* Btree iterators: */
 
+int __must_check
+__bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+       return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+}
+
 int __must_check
 bch2_btree_iter_traverse(struct btree_iter *iter)
 {
@@ -2416,7 +2422,7 @@ static void __bch2_trans_iter_init(struct btree_trans *trans,
        iter->path = bch2_path_get(trans,
                                   flags & BTREE_ITER_CACHED,
                                   btree_id,
-                                  btree_iter_search_key(iter),
+                                  iter->pos,
                                   locks_want,
                                   depth,
                                   flags & BTREE_ITER_INTENT);
index 2459291231eac71f7a9c8cfe910b2080298e3ba4..58add0bb1c815faaf2e08558a0b901f44d74cba9 100644 (file)
@@ -221,6 +221,7 @@ void bch2_trans_downgrade(struct btree_trans *);
 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
 
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
 
 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
index 310442fcc37f766a32937e92b227e3b288dd7dab..eb4217a3b7197104313e0dab7c1367c448d368fd 100644 (file)
@@ -985,7 +985,14 @@ next:
                bch2_bkey_merge(c, bkey_i_to_s(insert), k);
 out:
        if (!bkey_deleted(&insert->k)) {
-               bch2_btree_iter_set_pos(&iter, insert->k.p);
+               /*
+                * Rewinding iterators is expensive: get a new one and the one
+                * that points to the start of insert will be cloned from:
+                */
+               bch2_trans_iter_exit(trans, &iter);
+               bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p,
+                                    BTREE_ITER_NOT_EXTENTS|
+                                    BTREE_ITER_INTENT);
                ret   = bch2_btree_iter_traverse(&iter) ?:
                        bch2_trans_update(trans, &iter, insert, flags);
        }
index bee33258c0d80e8ccda7c3c086ce5d49d7f3ac7a..f95ceb820faabf19ab7dcc688424d7cc62fadfca 100644 (file)
@@ -281,6 +281,16 @@ int bch2_extent_update(struct btree_trans *trans,
        s64 i_sectors_delta = 0, disk_sectors_delta = 0;
        int ret;
 
+       /*
+        * This traverses us the iterator without changing iter->path->pos to
+        * search_key() (which is pos + 1 for extents): we want there to be a
+        * path already traversed at iter->pos because
+        * bch2_trans_extent_update() will use it to attempt extent merging
+        */
+       ret = __bch2_btree_iter_traverse(iter);
+       if (ret)
+               return ret;
+
        ret = bch2_extent_trim_atomic(trans, iter, k);
        if (ret)
                return ret;