bcachefs: kill btree_path->(alloc_seq|downgrade_seq)
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 10 Dec 2023 21:12:24 +0000 (16:12 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:41 +0000 (11:47 -0500)
These were for extra info in tracepoints for debugging a specialized
issue - we do not want to bloat btree_path for this, at least in release
builds.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_types.h
fs/bcachefs/trace.h

index e2f010564ceb3968c303baed31a59a310c4bf5f5..48691b62d67137af847f0d368a2ab1d2ebca59c8 100644 (file)
@@ -1539,7 +1539,6 @@ static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
        path->ref               = 0;
        path->intent_ref        = 0;
        path->nodes_locked      = 0;
-       path->alloc_seq++;
 
        btree_path_list_add(trans, pos, path);
        trans->paths_sorted = false;
index 1eca320e7574ef251c2f62a5463fb93da7530f7c..24a91cc38538dc895f0a5d5fac1bb7008fe535f0 100644 (file)
@@ -708,7 +708,6 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
 
        bch2_btree_path_verify_locks(path);
 
-       path->downgrade_seq++;
        trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
 }
 
index 78d9f585db4545312739490725ef71ee19af3b46..78de9569ff140cb875431e62baa80df71b832111 100644 (file)
@@ -227,8 +227,6 @@ struct btree_path {
        u8                      sorted_idx;
        u8                      ref;
        u8                      intent_ref;
-       u32                     alloc_seq;
-       u32                     downgrade_seq;
 
        /* btree_iter_copy starts here: */
        struct bpos             pos;
index cfa7ee780fd4cab217b8749443e544b716318f77..427edb3e7cd6ec40fb6d57b29fe99e40b09996b5 100644 (file)
@@ -1145,8 +1145,6 @@ TRACE_EVENT(trans_restart_upgrade,
                __field(u8,                     level           )
                __field(u32,                    path_seq        )
                __field(u32,                    node_seq        )
-               __field(u32,                    path_alloc_seq  )
-               __field(u32,                    downgrade_seq)
                TRACE_BPOS_entries(pos)
        ),
 
@@ -1159,12 +1157,10 @@ TRACE_EVENT(trans_restart_upgrade,
                __entry->level                  = f->l;
                __entry->path_seq               = path->l[f->l].lock_seq;
                __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
-               __entry->path_alloc_seq         = path->alloc_seq;
-               __entry->downgrade_seq          = path->downgrade_seq;
                TRACE_BPOS_assign(pos, path->pos)
        ),
 
-       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
                  bch2_btree_id_str(__entry->btree_id),
@@ -1175,9 +1171,7 @@ TRACE_EVENT(trans_restart_upgrade,
                  __entry->new_locks_want,
                  __entry->level,
                  __entry->path_seq,
-                 __entry->node_seq,
-                 __entry->path_alloc_seq,
-                 __entry->downgrade_seq)
+                 __entry->node_seq)
 );
 
 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,