bcachefs: Add persistent counters for all tracepoints
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 27 Aug 2022 16:48:36 +0000 (12:48 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:39 +0000 (17:09 -0400)
Also, do some reorganizing/renaming, convert atomic counters in bch_fs
to persistent counters, and add a few missing counters.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
23 files changed:
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_foreground.c
fs/bcachefs/bcachefs.h
fs/bcachefs/bcachefs_format.h
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/data_update.c
fs/bcachefs/io.c
fs/bcachefs/journal.c
fs/bcachefs/journal_io.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/move.c
fs/bcachefs/movinggc.c
fs/bcachefs/super-io.c
fs/bcachefs/sysfs.c
fs/bcachefs/trace.h

index 15c3c9a2da7b9155990f8ce609f3daa585b0e1df..ffcfb9f1916e9d75ccf3cfbe43a70ef5ecfecb94 100644 (file)
@@ -1134,8 +1134,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
        if (ret)
                goto out;
 
-       trace_invalidate_bucket(c, bucket.inode, bucket.offset, cached_sectors);
-       this_cpu_inc(c->counters[BCH_COUNTER_bucket_invalidate]);
+       trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
        --*nr_to_invalidate;
 out:
        bch2_trans_iter_exit(trans, &alloc_iter);
index bbe74a05a7a2779cc638116f710e1d0a507f4782..f60fe159916e19bfd22761621507e645b44cd0d3 100644 (file)
@@ -584,32 +584,32 @@ err:
        if (!ob)
                ob = ERR_PTR(-BCH_ERR_no_buckets_found);
 
-       if (!IS_ERR(ob)) {
-               trace_bucket_alloc(ca, bch2_alloc_reserves[reserve],
-                                  usage.d[BCH_DATA_free].buckets,
-                                  avail,
-                                  bch2_copygc_wait_amount(c),
-                                  c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
-                                  buckets_seen,
-                                  skipped_open,
-                                  skipped_need_journal_commit,
-                                  skipped_nouse,
-                                  cl == NULL,
-                                  "");
-       } else {
-               trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve],
-                                  usage.d[BCH_DATA_free].buckets,
-                                  avail,
-                                  bch2_copygc_wait_amount(c),
-                                  c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
-                                  buckets_seen,
-                                  skipped_open,
-                                  skipped_need_journal_commit,
-                                  skipped_nouse,
-                                  cl == NULL,
-                                  bch2_err_str(PTR_ERR(ob)));
-               atomic_long_inc(&c->bucket_alloc_fail);
-       }
+       if (!IS_ERR(ob))
+               trace_and_count(c, bucket_alloc, ca,
+                               bch2_alloc_reserves[reserve],
+                               usage.d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               buckets_seen,
+                               skipped_open,
+                               skipped_need_journal_commit,
+                               skipped_nouse,
+                               cl == NULL,
+                               "");
+       else
+               trace_and_count(c, bucket_alloc_fail, ca,
+                               bch2_alloc_reserves[reserve],
+                               usage.d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               buckets_seen,
+                               skipped_open,
+                               skipped_need_journal_commit,
+                               skipped_nouse,
+                               cl == NULL,
+                               bch2_err_str(PTR_ERR(ob)));
 
        return ob;
 }
index 9e6c10dfa443f457ffcc86e244b3196b7fe063b0..bca61af7165226bc218e7be8a9667b3f2a55a1ef 100644 (file)
 #define dynamic_fault(...)             0
 #define race_fault(...)                        0
 
+#define trace_and_count(_c, _name, ...)                                        \
+do {                                                                   \
+       this_cpu_inc((_c)->counters[BCH_COUNTER_##_name]);              \
+       trace_##_name(__VA_ARGS__);                                     \
+} while (0)
+
 #define bch2_fs_init_fault(name)                                       \
        dynamic_fault("bcachefs:bch_fs_init:" name)
 #define bch2_meta_read_fault(name)                                     \
@@ -916,12 +922,6 @@ mempool_t          bio_bounce_pages;
 
        u64                     last_bucket_seq_cleanup;
 
-       /* TODO rewrite as counters - The rest of this all shows up in sysfs */
-       atomic_long_t           read_realloc_races;
-       atomic_long_t           extent_migrate_done;
-       atomic_long_t           extent_migrate_raced;
-       atomic_long_t           bucket_alloc_fail;
-
        u64                     counters_on_mount[BCH_COUNTER_NR];
        u64 __percpu            *counters;
 
index b9d614f608b5b4c7d3f84287df2139ca9c9423d9..0e80fe2568f26741abdca19c4d0cfa10a49bc18c 100644 (file)
@@ -1326,12 +1326,81 @@ struct bch_sb_field_disk_groups {
 
 /* BCH_SB_FIELD_counters */
 
-#define BCH_PERSISTENT_COUNTERS()                      \
-       x(io_read,              0)                      \
-       x(io_write,             1)                      \
-       x(io_move,              2)                      \
-       x(bucket_invalidate,    3)                      \
-       x(bucket_discard,       4)
+#define BCH_PERSISTENT_COUNTERS()                              \
+       x(io_read,                                      0)      \
+       x(io_write,                                     1)      \
+       x(io_move,                                      2)      \
+       x(bucket_invalidate,                            3)      \
+       x(bucket_discard,                               4)      \
+       x(bucket_alloc,                                 5)      \
+       x(bucket_alloc_fail,                            6)      \
+       x(btree_cache_scan,                             7)      \
+       x(btree_cache_reap,                             8)      \
+       x(btree_cache_cannibalize,                      9)      \
+       x(btree_cache_cannibalize_lock,                 10)     \
+       x(btree_cache_cannibalize_lock_fail,            11)     \
+       x(btree_cache_cannibalize_unlock,               12)     \
+       x(btree_node_write,                             13)     \
+       x(btree_node_read,                              14)     \
+       x(btree_node_compact,                           15)     \
+       x(btree_node_merge,                             16)     \
+       x(btree_node_split,                             17)     \
+       x(btree_node_rewrite,                           18)     \
+       x(btree_node_alloc,                             19)     \
+       x(btree_node_free,                              20)     \
+       x(btree_node_set_root,                          21)     \
+       x(btree_path_relock_fail,                       22)     \
+       x(btree_path_upgrade_fail,                      23)     \
+       x(btree_reserve_get_fail,                       24)     \
+       x(journal_entry_full,                           25)     \
+       x(journal_full,                                 26)     \
+       x(journal_reclaim_finish,                       27)     \
+       x(journal_reclaim_start,                        28)     \
+       x(journal_write,                                29)     \
+       x(read_promote,                                 30)     \
+       x(read_bounce,                                  31)     \
+       x(read_split,                                   33)     \
+       x(read_retry,                                   32)     \
+       x(read_reuse_race,                              34)     \
+       x(move_extent_read,                             35)     \
+       x(move_extent_write,                            36)     \
+       x(move_extent_finish,                           37)     \
+       x(move_extent_fail,                             38)     \
+       x(move_extent_alloc_mem_fail,                   39)     \
+       x(copygc,                                       40)     \
+       x(copygc_wait,                                  41)     \
+       x(gc_gens_end,                                  42)     \
+       x(gc_gens_start,                                43)     \
+       x(trans_blocked_journal_reclaim,                44)     \
+       x(trans_restart_btree_node_reused,              45)     \
+       x(trans_restart_btree_node_split,               46)     \
+       x(trans_restart_fault_inject,                   47)     \
+       x(trans_restart_iter_upgrade,                   48)     \
+       x(trans_restart_journal_preres_get,             49)     \
+       x(trans_restart_journal_reclaim,                50)     \
+       x(trans_restart_journal_res_get,                51)     \
+       x(trans_restart_key_cache_key_realloced,        52)     \
+       x(trans_restart_key_cache_raced,                53)     \
+       x(trans_restart_mark_replicas,                  54)     \
+       x(trans_restart_mem_realloced,                  55)     \
+       x(trans_restart_memory_allocation_failure,      56)     \
+       x(trans_restart_relock,                         57)     \
+       x(trans_restart_relock_after_fill,              58)     \
+       x(trans_restart_relock_key_cache_fill,          59)     \
+       x(trans_restart_relock_next_node,               60)     \
+       x(trans_restart_relock_parent_for_fill,         61)     \
+       x(trans_restart_relock_path,                    62)     \
+       x(trans_restart_relock_path_intent,             63)     \
+       x(trans_restart_too_many_iters,                 64)     \
+       x(trans_restart_traverse,                       65)     \
+       x(trans_restart_upgrade,                        66)     \
+       x(trans_restart_would_deadlock,                 67)     \
+       x(trans_restart_would_deadlock_write,           68)     \
+       x(trans_restart_injected,                       69)     \
+       x(trans_restart_key_cache_upgrade,              70)     \
+       x(trans_traverse_all,                           71)     \
+       x(transaction_commit,                           72)     \
+       x(write_super,                                  73)
 
 enum bch_persistent_counters {
 #define x(t, n, ...) BCH_COUNTER_##t,
index 7ffa88b742364fe9b14b7ae4af725b6fd4370500..e09fbf36ebc26587501cd43ebd27f59427982385 100644 (file)
@@ -253,7 +253,7 @@ wait_on_io:
        }
 out:
        if (b->hash_val && !ret)
-               trace_btree_node_reap(c, b);
+               trace_and_count(c, btree_cache_reap, c, b);
        return ret;
 out_unlock:
        six_unlock_write(&b->c.lock);
@@ -377,7 +377,7 @@ out:
        ret = freed;
        memalloc_nofs_restore(flags);
 out_norestore:
-       trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
+       trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
        return ret;
 }
 
@@ -504,7 +504,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c)
        struct btree_cache *bc = &c->btree_cache;
 
        if (bc->alloc_lock == current) {
-               trace_btree_node_cannibalize_unlock(c);
+               trace_and_count(c, btree_cache_cannibalize_unlock, c);
                bc->alloc_lock = NULL;
                closure_wake_up(&bc->alloc_wait);
        }
@@ -520,7 +520,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
                goto success;
 
        if (!cl) {
-               trace_btree_node_cannibalize_lock_fail(c);
+               trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
                return -ENOMEM;
        }
 
@@ -534,11 +534,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
                goto success;
        }
 
-       trace_btree_node_cannibalize_lock_fail(c);
+       trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
        return -EAGAIN;
 
 success:
-       trace_btree_node_cannibalize_lock(c);
+       trace_and_count(c, btree_cache_cannibalize_lock, c);
        return 0;
 }
 
@@ -662,7 +662,7 @@ err_locked:
 
                mutex_unlock(&bc->lock);
 
-               trace_btree_node_cannibalize(c);
+               trace_and_count(c, btree_cache_cannibalize, c);
                goto out;
        }
 
@@ -691,7 +691,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
         * been freed:
         */
        if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
-               trace_trans_restart_relock_parent_for_fill(trans, _THIS_IP_, path);
+               trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
        }
 
@@ -699,7 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 
        if (trans && b == ERR_PTR(-ENOMEM)) {
                trans->memory_allocation_failure = true;
-               trace_trans_restart_memory_allocation_failure(trans, _THIS_IP_, path);
+               trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
        }
 
@@ -748,7 +748,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 
        if (!six_relock_type(&b->c.lock, lock_type, seq)) {
                if (trans)
-                       trace_trans_restart_relock_after_fill(trans, _THIS_IP_, path);
+                       trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
        }
 
@@ -903,7 +903,7 @@ lock_node:
                        if (bch2_btree_node_relock(trans, path, level + 1))
                                goto retry;
 
-                       trace_trans_restart_btree_node_reused(trans, trace_ip, path);
+                       trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
                        return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
                }
        }
index 4ab59880781a5af305e25e041d662284b5aad255..239eda57bf02680ffb667c684480541b0339b7d8 100644 (file)
@@ -1931,7 +1931,7 @@ int bch2_gc_gens(struct bch_fs *c)
        if (!mutex_trylock(&c->gc_gens_lock))
                return 0;
 
-       trace_gc_gens_start(c);
+       trace_and_count(c, gc_gens_start, c);
        down_read(&c->gc_lock);
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -1992,7 +1992,7 @@ int bch2_gc_gens(struct bch_fs *c)
        c->gc_count++;
 
        bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
-       trace_gc_gens_end(c);
+       trace_and_count(c, gc_gens_end, c);
 err:
        for_each_member_device(ca, c, i) {
                kvfree(ca->oldest_gen);
index bd74bd31dd1f77bff439a6a1bc7236e1c94b1bf3..b3dc8b43298e7c7b01482508467bf92e5a0635c0 100644 (file)
@@ -1485,7 +1485,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
        struct bio *bio;
        int ret;
 
-       trace_btree_read(c, b);
+       trace_and_count(c, btree_node_read, c, b);
 
        if (bch2_verify_all_btree_replicas &&
            !btree_node_read_all_replicas(c, b, sync))
@@ -1974,7 +1974,7 @@ do_write:
            c->opts.nochanges)
                goto err;
 
-       trace_btree_write(b, bytes_to_write, sectors_to_write);
+       trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
 
        wbio = container_of(bio_alloc_bioset(NULL,
                                buf_pages(data, sectors_to_write << 9),
index 99422e29c704899377afc721f5c00db59bba89f7..e76907af09f19da0fdbf337ad30cf690859b20f0 100644 (file)
@@ -1072,7 +1072,7 @@ err:
 
        trans->in_traverse_all = false;
 
-       trace_trans_traverse_all(trans, trace_ip);
+       trace_and_count(c, trans_traverse_all, trans, trace_ip);
        return ret;
 }
 
@@ -1209,7 +1209,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
                u64 max = ~(~0ULL << restart_probability_bits);
 
                if (!get_random_u32_below(max)) {
-                       trace_transaction_restart_injected(trans, _RET_IP_);
+                       trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
                }
        }
@@ -1728,7 +1728,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
                path->l[path->level].b          = ERR_PTR(-BCH_ERR_no_btree_node_relock);
                path->l[path->level + 1].b      = ERR_PTR(-BCH_ERR_no_btree_node_relock);
                btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-               trace_trans_restart_relock_next_node(trans, _THIS_IP_, path);
+               trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
                ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
                goto err;
        }
@@ -2773,7 +2773,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
                trans->mem_bytes = new_bytes;
 
                if (old_bytes) {
-                       trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes);
+                       trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
                        return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
                }
        }
index 1081ea753be6caf39353d1689705ac6e57214a33..bdc703324b9af6e58fddbc6b22c081627134e8a9 100644 (file)
@@ -388,7 +388,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
 {
        if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
-               trace_trans_restart_too_many_iters(trans, _THIS_IP_);
+               trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
                return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
        }
 
index cf41926b7f8e1385a60d645e325b894f5a35cedc..127cb6edaff5a0a2cc8479019eedc5861b803492 100644 (file)
@@ -291,7 +291,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
        k = bch2_btree_path_peek_slot(path, &u);
 
        if (!bch2_btree_node_relock(trans, ck_path, 0)) {
-               trace_trans_restart_relock_key_cache_fill(trans, _THIS_IP_, ck_path);
+               trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
                ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
                goto err;
        }
@@ -414,7 +414,7 @@ fill:
                 */
                if (!path->locks_want &&
                    !__bch2_btree_path_upgrade(trans, path, 1)) {
-                       trace_transaction_restart_key_cache_upgrade(trans, _THIS_IP_);
+                       trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
                        ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
                        goto err;
                }
index 76d99c694948dcb6547105186fcb4e02aa6f7351..301311763d59142a85498e911172f9e4b55bc4b4 100644 (file)
@@ -152,7 +152,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
        return btree_node_lock_type(trans, path, b, pos, level,
                                    type, should_sleep_fn, p);
 deadlock:
-       trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos);
+       trace_and_count(trans->c, trans_restart_would_deadlock, trans, ip, reason, linked, path, &pos);
        return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
 }
 
@@ -218,7 +218,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
                return true;
        }
 fail:
-       trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
+       trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
        return false;
 }
 
@@ -262,7 +262,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
                goto success;
        }
 
-       trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
+       trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
        return false;
 success:
        mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
@@ -285,7 +285,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
                if (!bch2_btree_node_relock(trans, path, l)) {
                        __bch2_btree_path_unlock(trans, path);
                        btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-                       trace_trans_restart_relock_path_intent(trans, _RET_IP_, path);
+                       trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
                }
        }
@@ -304,7 +304,7 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
                        struct btree_path *path, unsigned long trace_ip)
 {
        if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
-               trace_trans_restart_relock_path(trans, trace_ip, path);
+               trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
                return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
        }
 
@@ -416,7 +416,7 @@ int bch2_trans_relock(struct btree_trans *trans)
        trans_for_each_path(trans, path)
                if (path->should_be_locked &&
                    bch2_btree_path_relock(trans, path, _RET_IP_)) {
-                       trace_trans_restart_relock(trans, _RET_IP_, path);
+                       trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
                        BUG_ON(!trans->restarted);
                        return -BCH_ERR_transaction_restart_relock;
                }
index dd9405c631f5f3297791dcc73e1a9e1680cca928..1f5b98a3d0a2db4da9502172d8c091db3efc968b 100644 (file)
@@ -143,7 +143,7 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
 
 static void __btree_node_free(struct bch_fs *c, struct btree *b)
 {
-       trace_btree_node_free(c, b);
+       trace_and_count(c, btree_node_free, c, b);
 
        BUG_ON(btree_node_dirty(b));
        BUG_ON(btree_node_need_write(b));
@@ -305,7 +305,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
        ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
        BUG_ON(ret);
 
-       trace_btree_node_alloc(c, b);
+       trace_and_count(c, btree_node_alloc, c, b);
        return b;
 }
 
@@ -995,7 +995,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
                nr_nodes[1] += 1;
 
        if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
-               trace_trans_restart_iter_upgrade(trans, _RET_IP_, path);
+               trace_and_count(c, trans_restart_iter_upgrade, trans, _RET_IP_, path);
                ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
                return ERR_PTR(ret);
        }
@@ -1058,7 +1058,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
                                              BTREE_UPDATE_JOURNAL_RES,
                                              journal_flags);
                if (ret) {
-                       trace_trans_restart_journal_preres_get(trans, _RET_IP_, journal_flags);
+                       trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags);
                        ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
                        goto err;
                }
@@ -1091,8 +1091,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
        }
 
        if (ret) {
-               trace_btree_reserve_get_fail(trans->fn, _RET_IP_,
-                                            nr_nodes[0] + nr_nodes[1]);
+               trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]);
                goto err;
        }
 
@@ -1147,7 +1146,7 @@ static void bch2_btree_set_root(struct btree_update *as,
        struct bch_fs *c = as->c;
        struct btree *old;
 
-       trace_btree_set_root(c, b);
+       trace_and_count(c, btree_node_set_root, c, b);
        BUG_ON(!b->written);
 
        old = btree_node_root(c, b);
@@ -1434,7 +1433,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
                btree_split_insert_keys(as, trans, path, n1, keys);
 
        if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
-               trace_btree_split(c, b);
+               trace_and_count(c, btree_node_split, c, b);
 
                n2 = __btree_split_node(as, n1);
 
@@ -1468,7 +1467,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
                        bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
                }
        } else {
-               trace_btree_compact(c, b);
+               trace_and_count(c, btree_node_compact, c, b);
 
                bch2_btree_build_aux_trees(n1);
                six_unlock_write(&n1->c.lock);
@@ -1737,7 +1736,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
        if (ret)
                goto err;
 
-       trace_btree_merge(c, b);
+       trace_and_count(c, btree_node_merge, c, b);
 
        bch2_btree_interior_update_will_free_node(as, b);
        bch2_btree_interior_update_will_free_node(as, m);
@@ -1829,7 +1828,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
        bch2_btree_build_aux_trees(n);
        six_unlock_write(&n->c.lock);
 
-       trace_btree_rewrite(c, b);
+       trace_and_count(c, btree_node_rewrite, c, b);
 
        bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
 
index e3501623931ac43ca81cb67639236bfab4efc89f..732d09d4504124589d159c1674743d0d034ad9b7 100644 (file)
@@ -285,7 +285,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
 
        ret = bch2_trans_relock(trans);
        if (ret) {
-               trace_trans_restart_journal_preres_get(trans, trace_ip, 0);
+               trace_and_count(c, trans_restart_journal_preres_get, trans, trace_ip, 0);
                return ret;
        }
 
@@ -375,7 +375,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
         * Keys returned by peek() are no longer valid pointers, so we need a
         * transaction restart:
         */
-       trace_trans_restart_key_cache_key_realloced(trans, _RET_IP_, path, old_u64s, new_u64s);
+       trace_and_count(c, trans_restart_key_cache_key_realloced, trans, _RET_IP_, path, old_u64s, new_u64s);
        return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced);
 }
 
@@ -567,7 +567,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
        int ret;
 
        if (race_fault()) {
-               trace_trans_restart_fault_inject(trans, trace_ip);
+               trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
                return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
        }
 
@@ -842,7 +842,7 @@ fail:
                bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b);
        }
 
-       trace_trans_restart_would_deadlock_write(trans);
+       trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
        return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
 }
 
@@ -975,7 +975,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
        case BTREE_INSERT_BTREE_NODE_FULL:
                ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
-                       trace_trans_restart_btree_node_split(trans, trace_ip, i->path);
+                       trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
                break;
        case BTREE_INSERT_NEED_MARK_REPLICAS:
                bch2_trans_unlock(trans);
@@ -986,7 +986,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 
                ret = bch2_trans_relock(trans);
                if (ret)
-                       trace_trans_restart_mark_replicas(trans, trace_ip);
+                       trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip);
                break;
        case BTREE_INSERT_NEED_JOURNAL_RES:
                bch2_trans_unlock(trans);
@@ -1003,12 +1003,12 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 
                ret = bch2_trans_relock(trans);
                if (ret)
-                       trace_trans_restart_journal_res_get(trans, trace_ip);
+                       trace_and_count(c, trans_restart_journal_res_get, trans, trace_ip);
                break;
        case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
                bch2_trans_unlock(trans);
 
-               trace_trans_blocked_journal_reclaim(trans, trace_ip);
+               trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
 
                wait_event_freezable(c->journal.reclaim_wait,
                                     (ret = journal_reclaim_wait_done(c)));
@@ -1017,7 +1017,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 
                ret = bch2_trans_relock(trans);
                if (ret)
-                       trace_trans_restart_journal_reclaim(trans, trace_ip);
+                       trace_and_count(c, trans_restart_journal_reclaim, trans, trace_ip);
                break;
        default:
                BUG_ON(ret >= 0);
@@ -1120,7 +1120,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
                BUG_ON(!i->path->should_be_locked);
 
                if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
-                       trace_trans_restart_upgrade(trans, _RET_IP_, i->path);
+                       trace_and_count(c, trans_restart_upgrade, trans, _RET_IP_, i->path);
                        ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
                        goto out;
                }
@@ -1166,7 +1166,7 @@ retry:
        if (ret)
                goto err;
 
-       trace_transaction_commit(trans, _RET_IP_);
+       trace_and_count(c, transaction_commit, trans, _RET_IP_);
 out:
        bch2_journal_preres_put(&c->journal, &trans->journal_preres);
 
@@ -1642,7 +1642,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
                        ck = (void *) iter->key_cache_path->l[0].b;
 
                        if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
-                               trace_trans_restart_key_cache_raced(trans, _RET_IP_);
+                               trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
                                return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
                        }
 
index f9eb147fe2290761b603d4894e7742b81002357d..0b6f765bcad994951e158f6812f9642e86905598 100644 (file)
@@ -231,9 +231,12 @@ int bch2_data_update_index_update(struct bch_write_op *op)
                                m->data_opts.btree_insert_flags);
                if (!ret) {
                        bch2_btree_iter_set_pos(&iter, next_pos);
-                       atomic_long_inc(&c->extent_migrate_done);
+
                        if (ec_ob)
                                bch2_ob_add_backpointer(c, ec_ob, &insert->k);
+
+                       this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
+                       trace_move_extent_finish(&new->k);
                }
 err:
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -248,16 +251,16 @@ next:
                }
                continue;
 nomatch:
-               trace_data_update_fail(&old.k->p);
-
                if (m->ctxt) {
                        BUG_ON(k.k->p.offset <= iter.pos.offset);
                        atomic64_inc(&m->ctxt->stats->keys_raced);
                        atomic64_add(k.k->p.offset - iter.pos.offset,
                                     &m->ctxt->stats->sectors_raced);
                }
-               atomic_long_inc(&c->extent_migrate_raced);
-               trace_move_race(&new->k);
+
+               this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
+               trace_move_extent_fail(&new->k);
+
                bch2_btree_iter_advance(&iter);
                goto next;
        }
index 44fb14a5b5aee7f263c72616e07311bea908ba4d..ed78cb8d90a202f2eed9c487c95777aadeb4d6c3 100644 (file)
@@ -1496,7 +1496,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
 {
        struct bio *bio = &op->write.op.wbio.bio;
 
-       trace_promote(&rbio->bio);
+       trace_and_count(op->write.op.c, read_promote, &rbio->bio);
 
        /* we now own pages: */
        BUG_ON(!rbio->bounce);
@@ -1761,7 +1761,7 @@ static void bch2_rbio_retry(struct work_struct *work)
        };
        struct bch_io_failures failed = { .nr = 0 };
 
-       trace_read_retry(&rbio->bio);
+       trace_and_count(c, read_retry, &rbio->bio);
 
        if (rbio->retry == READ_RETRY_AVOID)
                bch2_mark_io_failure(&failed, &rbio->pick);
@@ -2017,7 +2017,7 @@ static void bch2_read_endio(struct bio *bio)
 
        if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
            ptr_stale(ca, &rbio->pick.ptr)) {
-               atomic_long_inc(&c->read_realloc_races);
+               trace_and_count(c, read_reuse_race, &rbio->bio);
 
                if (rbio->flags & BCH_READ_RETRY_IF_STALE)
                        bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
@@ -2305,7 +2305,7 @@ get_bio:
        rbio->bio.bi_end_io     = bch2_read_endio;
 
        if (rbio->bounce)
-               trace_read_bounce(&rbio->bio);
+               trace_and_count(c, read_bounce, &rbio->bio);
 
        this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
        bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
@@ -2320,7 +2320,7 @@ get_bio:
 
        if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
                bio_inc_remaining(&orig->bio);
-               trace_read_split(&orig->bio);
+               trace_and_count(c, read_split, &orig->bio);
        }
 
        if (!rbio->pick.idx) {
index 26f60db751cae081b73cb6a8d92dd4ca99fbe497..9961cc674ad7383c57cf2ecad1825b712858c386 100644 (file)
@@ -390,12 +390,12 @@ retry:
        ret = journal_entry_open(j);
 
        if (ret == JOURNAL_ERR_max_in_flight)
-               trace_journal_entry_full(c);
+               trace_and_count(c, journal_entry_full, c);
 unlock:
        if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
            !j->res_get_blocked_start) {
                j->res_get_blocked_start = local_clock() ?: 1;
-               trace_journal_full(c);
+               trace_and_count(c, journal_full, c);
        }
 
        can_discard = j->can_discard;
index acb2005c3b72d4d8c1320c61ba4bf751163ce1b2..090a718b917f1c1b92d1e7d9647bbd738d9fac18 100644 (file)
@@ -1552,7 +1552,7 @@ static void do_journal_write(struct closure *cl)
 
                bch2_bio_map(bio, w->data, sectors << 9);
 
-               trace_journal_write(bio);
+               trace_and_count(c, journal_write, bio);
                closure_bio_submit(bio, cl);
 
                ca->journal.bucket_seq[ca->journal.cur_idx] =
index 00d9e3a8e526a7451619bcf27907ccfcc343ca37..a4f9d01d33ccb30d41dcec81d299d5fca2c2efad 100644 (file)
@@ -642,7 +642,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
 
                min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
 
-               trace_journal_reclaim_start(c, direct, kicked,
+               trace_and_count(c, journal_reclaim_start, c,
+                               direct, kicked,
                                min_nr, min_key_cache,
                                j->prereserved.reserved,
                                j->prereserved.remaining,
@@ -658,7 +659,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
                        j->nr_direct_reclaim += nr_flushed;
                else
                        j->nr_background_reclaim += nr_flushed;
-               trace_journal_reclaim_finish(c, nr_flushed);
+               trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
 
                if (nr_flushed)
                        wake_up(&j->reclaim_wait);
index ea9ce6d436a26dcc9c280490675233b5b1a8b320..0486c7e14c56ea96cf750f9eee95709d269c08b7 100644 (file)
@@ -245,8 +245,8 @@ static int bch2_move_extent(struct btree_trans *trans,
        atomic64_inc(&ctxt->stats->keys_moved);
        atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
        this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
-
-       trace_move_extent(k.k);
+       this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
+       trace_move_extent_read(k.k);
 
        atomic_add(io->read_sectors, &ctxt->read_sectors);
        list_add_tail(&io->list, &ctxt->reads);
@@ -268,7 +268,7 @@ err_free:
        kfree(io);
 err:
        percpu_ref_put(&c->writes);
-       trace_move_alloc_mem_fail(k.k);
+       trace_and_count(c, move_extent_alloc_mem_fail, k.k);
        return ret;
 }
 
index 438ea22ad5bdcb3918f03e42e6727b7d7d1325b4..dca8d4a3a89c2dcc601c995c5c35a7d01842bd6f 100644 (file)
@@ -339,7 +339,7 @@ static int bch2_copygc(struct bch_fs *c)
                         atomic64_read(&move_stats.keys_raced),
                         atomic64_read(&move_stats.sectors_raced));
 
-       trace_copygc(c,
+       trace_and_count(c, copygc, c,
                     atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
                     buckets_to_move, buckets_not_moved);
        return 0;
@@ -397,7 +397,7 @@ static int bch2_copygc_thread(void *arg)
                wait = bch2_copygc_wait_amount(c);
 
                if (wait > clock->max_slop) {
-                       trace_copygc_wait(c, wait, last + wait);
+                       trace_and_count(c, copygc_wait, c, wait, last + wait);
                        c->copygc_wait = last + wait;
                        bch2_kthread_io_clock_wait(clock, last + wait,
                                        MAX_SCHEDULE_TIMEOUT);
index 48ad158637e5ca6ee073be6190f1de7eff18a472..4953f54e94d6036bbfbc9bf3e1aca18fa614c0c6 100644 (file)
@@ -801,7 +801,7 @@ int bch2_write_super(struct bch_fs *c)
        unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
        int ret = 0;
 
-       trace_write_super(c, _RET_IP_);
+       trace_and_count(c, write_super, c, _RET_IP_);
 
        if (c->opts.very_degraded)
                degraded_flags |= BCH_FORCE_IF_LOST;
index 4e2b6285cf3ad155fa91d7996976e9403bb7fa0b..d10ac84c10ce1f0069a15f0225932603cdc4da75 100644 (file)
@@ -190,11 +190,6 @@ read_attribute(internal_uuid);
 read_attribute(has_data);
 read_attribute(alloc_debug);
 
-read_attribute(read_realloc_races);
-read_attribute(extent_migrate_done);
-read_attribute(extent_migrate_raced);
-read_attribute(bucket_alloc_fail);
-
 #define x(t, n, ...) read_attribute(t);
 BCH_PERSISTENT_COUNTERS()
 #undef x
@@ -378,15 +373,6 @@ SHOW(bch2_fs)
        sysfs_hprint(btree_cache_size,          bch2_btree_cache_size(c));
        sysfs_hprint(btree_avg_write_size,      bch2_btree_avg_write_size(c));
 
-       sysfs_print(read_realloc_races,
-                   atomic_long_read(&c->read_realloc_races));
-       sysfs_print(extent_migrate_done,
-                   atomic_long_read(&c->extent_migrate_done));
-       sysfs_print(extent_migrate_raced,
-                   atomic_long_read(&c->extent_migrate_raced));
-       sysfs_print(bucket_alloc_fail,
-                   atomic_long_read(&c->bucket_alloc_fail));
-
        sysfs_printf(btree_gc_periodic, "%u",   (int) c->btree_gc_periodic);
 
        if (attr == &sysfs_gc_gens_pos)
@@ -629,11 +615,6 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_trigger_invalidates,
        &sysfs_prune_cache,
 
-       &sysfs_read_realloc_races,
-       &sysfs_extent_migrate_done,
-       &sysfs_extent_migrate_raced,
-       &sysfs_bucket_alloc_fail,
-
        &sysfs_gc_gens_pos,
 
        &sysfs_copy_gc_enabled,
index 2c1661ab807b465c0151b3f8b55f6e88fc67d003..1ef99af5cd0347b6c714ea5260e41cc051295787 100644 (file)
@@ -52,6 +52,31 @@ DECLARE_EVENT_CLASS(bkey,
                  __entry->offset, __entry->size)
 );
 
+DECLARE_EVENT_CLASS(btree_node,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(u8,             level                   )
+               __field(u8,             btree_id                )
+               TRACE_BPOS_entries(pos)
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = c->dev;
+               __entry->level          = b->c.level;
+               __entry->btree_id       = b->c.btree_id;
+               TRACE_BPOS_assign(pos, b->key.k.p);
+       ),
+
+       TP_printk("%d,%d %u %s %llu:%llu:%u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->level,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
+);
+
 DECLARE_EVENT_CLASS(bch_fs,
        TP_PROTO(struct bch_fs *c),
        TP_ARGS(c),
@@ -112,7 +137,7 @@ TRACE_EVENT(write_super,
 
 /* io.c: */
 
-DEFINE_EVENT(bio, read_split,
+DEFINE_EVENT(bio, read_promote,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
@@ -122,12 +147,17 @@ DEFINE_EVENT(bio, read_bounce,
        TP_ARGS(bio)
 );
 
+DEFINE_EVENT(bio, read_split,
+       TP_PROTO(struct bio *bio),
+       TP_ARGS(bio)
+);
+
 DEFINE_EVENT(bio, read_retry,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bio, promote,
+DEFINE_EVENT(bio, read_reuse_race,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
@@ -220,8 +250,6 @@ TRACE_EVENT(journal_reclaim_finish,
                  __entry->nr_flushed)
 );
 
-/* allocator: */
-
 /* bset.c: */
 
 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
@@ -229,39 +257,61 @@ DEFINE_EVENT(bpos, bkey_pack_pos_fail,
        TP_ARGS(p)
 );
 
-/* Btree */
+/* Btree cache: */
 
-DECLARE_EVENT_CLASS(btree_node,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b),
+TRACE_EVENT(btree_cache_scan,
+       TP_PROTO(long nr_to_scan, long can_free, long ret),
+       TP_ARGS(nr_to_scan, can_free, ret),
 
        TP_STRUCT__entry(
-               __field(dev_t,          dev                     )
-               __field(u8,             level                   )
-               __field(u8,             btree_id                )
-               TRACE_BPOS_entries(pos)
+               __field(long,   nr_to_scan              )
+               __field(long,   can_free                )
+               __field(long,   ret                     )
        ),
 
        TP_fast_assign(
-               __entry->dev            = c->dev;
-               __entry->level          = b->c.level;
-               __entry->btree_id       = b->c.btree_id;
-               TRACE_BPOS_assign(pos, b->key.k.p);
+               __entry->nr_to_scan     = nr_to_scan;
+               __entry->can_free       = can_free;
+               __entry->ret            = ret;
        ),
 
-       TP_printk("%d,%d %u %s %llu:%llu:%u",
-                 MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->level,
-                 bch2_btree_ids[__entry->btree_id],
-                 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
+       TP_printk("scanned for %li nodes, can free %li, ret %li",
+                 __entry->nr_to_scan, __entry->can_free, __entry->ret)
+);
+
+DEFINE_EVENT(btree_node, btree_cache_reap,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
 );
 
-DEFINE_EVENT(btree_node, btree_read,
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+/* Btree */
+
+DEFINE_EVENT(btree_node, btree_node_read,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-TRACE_EVENT(btree_write,
+TRACE_EVENT(btree_node_write,
        TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
        TP_ARGS(b, bytes, sectors),
 
@@ -291,31 +341,6 @@ DEFINE_EVENT(btree_node, btree_node_free,
        TP_ARGS(c, b)
 );
 
-DEFINE_EVENT(btree_node, btree_node_reap,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b)
-);
-
-DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock_fail,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
-
-DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
-
-DEFINE_EVENT(bch_fs, btree_node_cannibalize,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
-
-DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
-
 TRACE_EVENT(btree_reserve_get_fail,
        TP_PROTO(const char *trans_fn,
                 unsigned long caller_ip,
@@ -340,52 +365,32 @@ TRACE_EVENT(btree_reserve_get_fail,
                  __entry->required)
 );
 
-DEFINE_EVENT(btree_node, btree_split,
+DEFINE_EVENT(btree_node, btree_node_compact,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-DEFINE_EVENT(btree_node, btree_compact,
+DEFINE_EVENT(btree_node, btree_node_merge,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-DEFINE_EVENT(btree_node, btree_merge,
+DEFINE_EVENT(btree_node, btree_node_split,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-DEFINE_EVENT(btree_node, btree_rewrite,
+DEFINE_EVENT(btree_node, btree_node_rewrite,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-DEFINE_EVENT(btree_node, btree_set_root,
+DEFINE_EVENT(btree_node, btree_node_set_root,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-TRACE_EVENT(btree_cache_scan,
-       TP_PROTO(long nr_to_scan, long can_free, long ret),
-       TP_ARGS(nr_to_scan, can_free, ret),
-
-       TP_STRUCT__entry(
-               __field(long,   nr_to_scan              )
-               __field(long,   can_free                )
-               __field(long,   ret                     )
-       ),
-
-       TP_fast_assign(
-               __entry->nr_to_scan     = nr_to_scan;
-               __entry->can_free       = can_free;
-               __entry->ret            = ret;
-       ),
-
-       TP_printk("scanned for %li nodes, can free %li, ret %li",
-                 __entry->nr_to_scan, __entry->can_free, __entry->ret)
-);
-
-TRACE_EVENT(btree_node_relock_fail,
+TRACE_EVENT(btree_path_relock_fail,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip,
                 struct btree_path *path,
@@ -429,7 +434,7 @@ TRACE_EVENT(btree_node_relock_fail,
                  __entry->node_lock_seq)
 );
 
-TRACE_EVENT(btree_node_upgrade_fail,
+TRACE_EVENT(btree_path_upgrade_fail,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip,
                 struct btree_path *path,
@@ -617,7 +622,7 @@ TRACE_EVENT(discard_buckets,
                  __entry->err)
 );
 
-TRACE_EVENT(invalidate_bucket,
+TRACE_EVENT(bucket_invalidate,
        TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
        TP_ARGS(c, dev, bucket, sectors),
 
@@ -643,17 +648,27 @@ TRACE_EVENT(invalidate_bucket,
 
 /* Moving IO */
 
-DEFINE_EVENT(bkey, move_extent,
+DEFINE_EVENT(bkey, move_extent_read,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
+);
+
+DEFINE_EVENT(bkey, move_extent_write,
        TP_PROTO(const struct bkey *k),
        TP_ARGS(k)
 );
 
-DEFINE_EVENT(bkey, move_alloc_mem_fail,
+DEFINE_EVENT(bkey, move_extent_finish,
        TP_PROTO(const struct bkey *k),
        TP_ARGS(k)
 );
 
-DEFINE_EVENT(bkey, move_race,
+DEFINE_EVENT(bkey, move_extent_fail,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
+);
+
+DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
        TP_PROTO(const struct bkey *k),
        TP_ARGS(k)
 );
@@ -732,11 +747,6 @@ TRACE_EVENT(copygc_wait,
                  __entry->wait_amount, __entry->until)
 );
 
-DEFINE_EVENT(bpos, data_update_fail,
-       TP_PROTO(const struct bpos *p),
-       TP_ARGS(p)
-);
-
 /* btree transactions: */
 
 DECLARE_EVENT_CLASS(transaction_event,
@@ -763,7 +773,7 @@ DEFINE_EVENT(transaction_event,     transaction_commit,
        TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(transaction_event,        transaction_restart_injected,
+DEFINE_EVENT(transaction_event,        trans_restart_injected,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip),
        TP_ARGS(trans, caller_ip)
@@ -926,7 +936,7 @@ DEFINE_EVENT(transaction_restart_iter,      trans_restart_relock_after_fill,
        TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(transaction_event,        transaction_restart_key_cache_upgrade,
+DEFINE_EVENT(transaction_event,        trans_restart_key_cache_upgrade,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip),
        TP_ARGS(trans, caller_ip)