From db92f2ea5ed576748b538d15446cebb65bb8d31f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 7 Sep 2021 15:34:16 -0400 Subject: [PATCH] bcachefs: Optimize btree lookups in write path This patch significantly reduces the number of btree lookups required in the extent update path. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 8 +++++++- fs/bcachefs/btree_iter.h | 1 + fs/bcachefs/btree_update_leaf.c | 9 ++++++++- fs/bcachefs/io.c | 10 ++++++++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 3aa2777e40a5b..d2ee6e9aa3701 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1805,6 +1805,12 @@ hole: /* Btree iterators: */ +int __must_check +__bch2_btree_iter_traverse(struct btree_iter *iter) +{ + return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); +} + int __must_check bch2_btree_iter_traverse(struct btree_iter *iter) { @@ -2416,7 +2422,7 @@ static void __bch2_trans_iter_init(struct btree_trans *trans, iter->path = bch2_path_get(trans, flags & BTREE_ITER_CACHED, btree_id, - btree_iter_search_key(iter), + iter->pos, locks_want, depth, flags & BTREE_ITER_INTENT); diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 2459291231eac..58add0bb1c815 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -221,6 +221,7 @@ void bch2_trans_downgrade(struct btree_trans *); void bch2_trans_node_add(struct btree_trans *trans, struct btree *); void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *); +int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter); int __must_check bch2_btree_iter_traverse(struct btree_iter *); struct btree *bch2_btree_iter_peek_node(struct btree_iter *); diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index 310442fcc37f7..eb4217a3b7197 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -985,7 +985,14 @@ next: bch2_bkey_merge(c, bkey_i_to_s(insert), k); out: if (!bkey_deleted(&insert->k)) { - bch2_btree_iter_set_pos(&iter, insert->k.p); + /* + * Rewinding iterators is expensive: get a new one and the one + * that points to the start of insert will be cloned from: + */ + bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p, + BTREE_ITER_NOT_EXTENTS| + BTREE_ITER_INTENT); ret = bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, insert, flags); } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index bee33258c0d80..f95ceb820faab 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -281,6 +281,16 @@ int bch2_extent_update(struct btree_trans *trans, s64 i_sectors_delta = 0, disk_sectors_delta = 0; int ret; + /* + * This traverses us the iterator without changing iter->path->pos to + * search_key() (which is pos + 1 for extents): we want there to be a + * path already traversed at iter->pos because + * bch2_trans_extent_update() will use it to attempt extent merging + */ + ret = __bch2_btree_iter_traverse(iter); + if (ret) + return ret; + ret = bch2_extent_trim_atomic(trans, iter, k); if (ret) return ret; -- 2.30.2