BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \
"significantly affect performance") \
+ BCH_DEBUG_PARAM(debug_check_iterators, \
+ "Enables extra verification for btree iterators") \
BCH_DEBUG_PARAM(debug_check_bkeys, \
"Run bkey_debugcheck (primarily checking GC/allocation "\
"information) when iterating over keys") \
k = p;
}
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+ if (btree_keys_expensive_checks(b)) {
BUG_ON(ret >= orig_k);
for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
- bch2_btree_node_iter_verify(iter, b);
- bch2_btree_node_iter_next_check(iter, b);
-#endif
+ if (btree_keys_expensive_checks(b)) {
+ bch2_btree_node_iter_verify(iter, b);
+ bch2_btree_node_iter_next_check(iter, b);
+ }
+
__bch2_btree_node_iter_advance(iter, b);
}
iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end;
out:
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+ if (btree_keys_expensive_checks(b)) {
struct btree_node_iter iter2 = *iter;
if (prev)
struct btree_node_iter tmp = l->iter;
struct bkey_packed *k;
+ if (!debug_check_iterators(iter->trans->c))
+ return;
+
if (iter->uptodate > BTREE_ITER_NEED_PEEK)
return;
{
struct btree_iter *linked;
+ if (!debug_check_iterators(iter->trans->c))
+ return;
+
trans_for_each_iter_with_node(iter->trans, b, linked)
__bch2_btree_iter_verify(linked, b);
}
struct bkey_packed *,
bool);
-static void verify_extent_nonoverlapping(struct btree *b,
+static void verify_extent_nonoverlapping(struct bch_fs *c,
+ struct btree *b,
struct btree_node_iter *_iter,
struct bkey_i *insert)
{
struct bkey_packed *k;
struct bkey uk;
+ if (!expensive_debug_checks(c))
+ return;
+
iter = *_iter;
k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
BUG_ON(k &&
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
- verify_extent_nonoverlapping(l->b, &l->iter, insert);
+ verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
node_iter = l->iter;
k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);