From e3ecf4f56811ec538ed93fe8dbeb68c81ba74cc8 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 2 Mar 2020 13:38:19 -0500 Subject: [PATCH] bcachefs: Some btree iterator improvements Signed-off-by: Kent Overstreet Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 83 ++++++++++++++++++---------------------- fs/bcachefs/tests.c | 46 +++++++++++++--------- 2 files changed, 65 insertions(+), 64 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 3817dcb5fa1f4..f745d228d21c9 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -35,6 +35,26 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) return pos; } +static inline bool btree_iter_pos_before_node(struct btree_iter *iter, + struct btree *b) +{ + return bkey_cmp(iter->pos, b->data->min_key) < 0; +} + +static inline bool btree_iter_pos_after_node(struct btree_iter *iter, + struct btree *b) +{ + return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0; +} + +static inline bool btree_iter_pos_in_node(struct btree_iter *iter, + struct btree *b) +{ + return iter->btree_id == b->c.btree_id && + !btree_iter_pos_before_node(iter, b) && + !btree_iter_pos_after_node(iter, b); +} + /* Btree node locking: */ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter) @@ -399,6 +419,8 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter, if (iter->uptodate > BTREE_ITER_NEED_PEEK) return; + BUG_ON(!btree_iter_pos_in_node(iter, b)); + bch2_btree_node_iter_verify(&l->iter, b); /* @@ -736,26 +758,6 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) btree_node_unlock(iter, b->c.level + 1); } -static inline bool btree_iter_pos_before_node(struct btree_iter *iter, - struct btree *b) -{ - return bkey_cmp(iter->pos, b->data->min_key) < 0; -} - -static inline bool btree_iter_pos_after_node(struct btree_iter *iter, - struct btree *b) -{ - return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0; -} - -static inline bool btree_iter_pos_in_node(struct btree_iter *iter, - struct btree *b) -{ - return iter->btree_id == b->c.btree_id && - !btree_iter_pos_before_node(iter, b) && - !btree_iter_pos_after_node(iter, b); -} - static inline void __btree_iter_init(struct btree_iter *iter, unsigned level) { @@ -1373,6 +1375,10 @@ static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter) return true; } +/** + * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key + * it currently points to + */ static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter) { struct btree_iter_level *l = &iter->l[0]; @@ -1409,7 +1415,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); - if (iter->uptodate == BTREE_ITER_UPTODATE) + if (iter->uptodate == BTREE_ITER_UPTODATE && + !bkey_deleted(&iter->k)) return btree_iter_peek_uptodate(iter); while (1) { @@ -1503,7 +1510,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); - if (iter->uptodate == BTREE_ITER_UPTODATE) + if (iter->uptodate == BTREE_ITER_UPTODATE && + !bkey_deleted(&iter->k)) return btree_iter_peek_uptodate(iter); while (1) { @@ -1655,33 +1663,15 @@ __bch2_btree_iter_peek_slot(struct btree_iter *iter) { struct btree_iter_level *l = &iter->l[0]; struct bkey_s_c k; - int ret; if (iter->flags & BTREE_ITER_IS_EXTENTS) return __bch2_btree_iter_peek_slot_extents(iter); -recheck: - while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k && - bkey_deleted(k.k) && - bkey_cmp(k.k->p, iter->pos) == 0) - bch2_btree_node_iter_advance(&l->iter, l->b); + k = __btree_iter_peek_all(iter, l, &iter->k); - /* - * If we got to the end of the node, check if we need to traverse to the - * next node: - */ - if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) { - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0); - goto recheck; - } - - if (!k.k || - bkey_deleted(k.k) || - bkey_cmp(iter->pos, k.k->p)) { + if (!k.k || bkey_cmp(iter->pos, k.k->p)) { /* hole */ bkey_init(&iter->k); iter->k.p = iter->pos; @@ -1713,8 +1703,12 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); + /* XXX directly setting iter->pos is wrong */ iter->pos = btree_type_successor(iter->btree_id, iter->k.p); + if (unlikely(btree_iter_pos_after_node(iter, iter->l[0].b))) + btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); + if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) { /* * XXX: when we just need to relock we should be able to avoid @@ -1726,8 +1720,7 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) return bch2_btree_iter_peek_slot(iter); } - if (!bkey_deleted(&iter->k)) - bch2_btree_node_iter_advance(&iter->l[0].iter, iter->l[0].b); + btree_iter_advance_to_pos(iter, &iter->l[0], -1); btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index 876d64bfca200..6aa31369ecc9a 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -18,7 +18,7 @@ static void delete_test_keys(struct bch_fs *c) NULL); BUG_ON(ret); - ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS, + ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS, POS(0, 0), POS(0, U64_MAX), NULL); BUG_ON(ret); @@ -37,7 +37,7 @@ static void test_delete(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p, + iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p, BTREE_ITER_INTENT); ret = bch2_btree_iter_traverse(iter); @@ -69,7 +69,7 @@ static void test_delete_written(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p, + iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p, BTREE_ITER_INTENT); ret = bch2_btree_iter_traverse(iter); @@ -107,7 +107,7 @@ static void test_iterate(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i; - ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i, + ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i, NULL, NULL, 0); BUG_ON(ret); } @@ -116,9 +116,13 @@ static void test_iterate(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, - POS_MIN, 0, k, ret) + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, + POS_MIN, 0, k, ret) { + if (k.k->p.inode) + break; + BUG_ON(k.k->p.offset != i++); + } BUG_ON(i != nr); @@ -202,7 +206,7 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i * 2; - ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i, + ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i, NULL, NULL, 0); BUG_ON(ret); } @@ -211,8 +215,11 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret) { + if (k.k->p.inode) + break; + BUG_ON(k.k->p.offset != i); i += 2; } @@ -224,11 +231,12 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, BTREE_ITER_SLOTS, k, ret) { + BUG_ON(k.k->p.offset != i); BUG_ON(bkey_deleted(k.k) != (i & 1)); - BUG_ON(k.k->p.offset != i++); + i++; if (i == nr * 2) break; } @@ -307,7 +315,7 @@ static void test_peek_end(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0); + iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0); k = bch2_btree_iter_peek(iter); BUG_ON(k.k); @@ -421,7 +429,7 @@ static void rand_insert(struct bch_fs *c, u64 nr) k.k.p.offset = test_rand(); ret = __bch2_trans_do(&trans, NULL, NULL, 0, - __bch2_btree_insert(&trans, BTREE_ID_DIRENTS, &k.k_i)); + __bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i)); BUG_ON(ret); } @@ -439,7 +447,7 @@ static void rand_lookup(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); for (i = 0; i < nr; i++) { - iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, + iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS(0, test_rand()), 0); k = bch2_btree_iter_peek(iter); @@ -460,7 +468,7 @@ static void rand_mixed(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); for (i = 0; i < nr; i++) { - iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, + iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS(0, test_rand()), 0); k = bch2_btree_iter_peek(iter); @@ -490,7 +498,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos) struct bkey_s_c k; int ret = 0; - iter = bch2_trans_get_iter(trans, BTREE_ID_DIRENTS, pos, + iter = bch2_trans_get_iter(trans, BTREE_ID_XATTRS, pos, BTREE_ITER_INTENT); ret = PTR_ERR_OR_ZERO(iter); if (ret) @@ -542,7 +550,7 @@ static void seq_insert(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { insert.k.p = iter->pos; @@ -566,7 +574,7 @@ static void seq_lookup(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret) + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret) ; bch2_trans_exit(&trans); } @@ -580,7 +588,7 @@ static void seq_overwrite(struct bch_fs *c, u64 nr) bch2_trans_init(&trans, c, 0, 0); - for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, BTREE_ITER_INTENT, k, ret) { struct bkey_i_cookie u; @@ -598,7 +606,7 @@ static void seq_delete(struct bch_fs *c, u64 nr) { int ret; - ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS, + ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS, POS(0, 0), POS(0, U64_MAX), NULL); BUG_ON(ret); -- 2.30.2