bcachefs: move some checks to expensive_debug_checks
authorKent Overstreet <kent.overstreet@gmail.com>
Thu, 28 Mar 2019 05:51:47 +0000 (01:51 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:19 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/bset.c
fs/bcachefs/btree_iter.c
fs/bcachefs/extents.c

index a815d7a488a6b5662b8be25cdf03c5fd70c8a6f2..a2d8e37e7eb6a9612ea22e20a646f4076a0c3233 100644 (file)
@@ -257,6 +257,8 @@ do {                                                                        \
        BCH_DEBUG_PARAM(expensive_debug_checks,                         \
                "Enables various runtime debugging checks that "        \
                "significantly affect performance")                     \
+       BCH_DEBUG_PARAM(debug_check_iterators,                          \
+               "Enables extra verification for btree iterators")       \
        BCH_DEBUG_PARAM(debug_check_bkeys,                              \
                "Run bkey_debugcheck (primarily checking GC/allocation "\
                "information) when iterating over keys")                \
index ac84aac4a26324d1552c55b452d612ed74532a34..68442a26756fd95d3c51e9bef752e48cc230b378 100644 (file)
@@ -1023,7 +1023,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
                k = p;
        }
 
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+       if (btree_keys_expensive_checks(b)) {
                BUG_ON(ret >= orig_k);
 
                for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
@@ -1644,10 +1644,11 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
                                  struct btree *b)
 {
-#ifdef CONFIG_BCACHEFS_DEBUG
-       bch2_btree_node_iter_verify(iter, b);
-       bch2_btree_node_iter_next_check(iter, b);
-#endif
+       if (btree_keys_expensive_checks(b)) {
+               bch2_btree_node_iter_verify(iter, b);
+               bch2_btree_node_iter_next_check(iter, b);
+       }
+
        __bch2_btree_node_iter_advance(iter, b);
 }
 
@@ -1710,7 +1711,7 @@ found:
        iter->data[0].k = __btree_node_key_to_offset(b, prev);
        iter->data[0].end = end;
 out:
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+       if (btree_keys_expensive_checks(b)) {
                struct btree_node_iter iter2 = *iter;
 
                if (prev)
index ad7858d77a58d8286e008a59a7ee54348b8032da..bc9d8444e2209ba9db190f94e0e85e6e85f54b43 100644 (file)
@@ -429,6 +429,9 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
        struct btree_node_iter tmp = l->iter;
        struct bkey_packed *k;
 
+       if (!debug_check_iterators(iter->trans->c))
+               return;
+
        if (iter->uptodate > BTREE_ITER_NEED_PEEK)
                return;
 
@@ -475,6 +478,9 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
 
+       if (!debug_check_iterators(iter->trans->c))
+               return;
+
        trans_for_each_iter_with_node(iter->trans, b, linked)
                __bch2_btree_iter_verify(linked, b);
 }
index ce46417b07a01b80e0c2840a606c059b7b62b7b6..2e7c3e82f03bfa47e9cae108d38c8eec598c5a3a 100644 (file)
@@ -788,7 +788,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
                                     struct bkey_packed *,
                                     bool);
 
-static void verify_extent_nonoverlapping(struct btree *b,
+static void verify_extent_nonoverlapping(struct bch_fs *c,
+                                        struct btree *b,
                                         struct btree_node_iter *_iter,
                                         struct bkey_i *insert)
 {
@@ -797,6 +798,9 @@ static void verify_extent_nonoverlapping(struct btree *b,
        struct bkey_packed *k;
        struct bkey uk;
 
+       if (!expensive_debug_checks(c))
+               return;
+
        iter = *_iter;
        k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
        BUG_ON(k &&
@@ -847,7 +851,7 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
        BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
 
        EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
-       verify_extent_nonoverlapping(l->b, &l->iter, insert);
+       verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
 
        node_iter = l->iter;
        k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);