*/
 struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
                                  const struct bkey_i *k, unsigned level,
-                                 enum six_lock_type lock_type)
+                                 enum six_lock_type lock_type,
+                                 unsigned long trace_ip)
 {
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
                        btree_node_unlock(iter, level + 1);
 
                if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
-                                    lock_node_check_fn, (void *) k)) {
+                                    lock_node_check_fn, (void *) k, trace_ip)) {
                        if (b->hash_val != btree_ptr_hash_val(k))
                                goto retry;
                        return ERR_PTR(-EINTR);
        bch2_bkey_unpack(parent, &tmp.k, k);
 
        ret = bch2_btree_node_get(c, iter, &tmp.k, level,
-                                 SIX_LOCK_intent);
+                                 SIX_LOCK_intent, _THIS_IP_);
 
        if (PTR_ERR_OR_ZERO(ret) == -EINTR && !trans->nounlock) {
                struct btree_iter *linked;
                        btree_node_unlock(iter, level);
 
                ret = bch2_btree_node_get(c, iter, &tmp.k, level,
-                                         SIX_LOCK_intent);
+                                         SIX_LOCK_intent, _THIS_IP_);
 
                /*
                 * before btree_iter_relock() calls btree_iter_verify_locks():
 
 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
                            unsigned level, struct btree_iter *iter,
                            enum six_lock_type type,
-                           six_lock_should_sleep_fn should_sleep_fn,
-                           void *p)
+                           six_lock_should_sleep_fn should_sleep_fn, void *p,
+                           unsigned long ip)
 {
        struct btree_trans *trans = iter->trans;
-       struct btree_iter *linked;
+       struct btree_iter *linked, *deadlock_iter = NULL;
        u64 start_time = local_clock();
-       bool ret = true;
+       unsigned reason = 9;
 
        /* Check if it's safe to block: */
        trans_for_each_iter(trans, linked) {
                                linked->locks_want = max_t(unsigned,
                                                linked->locks_want,
                                                __fls(linked->nodes_locked) + 1);
-                               if (!btree_iter_get_locks(linked, true, false))
-                                       ret = false;
+                               if (!btree_iter_get_locks(linked, true, false)) {
+                                       deadlock_iter = linked;
+                                       reason = 1;
+                               }
                        } else {
-                               ret = false;
+                               deadlock_iter = linked;
+                               reason = 2;
                        }
                }
 
                                        max(level + 1, max_t(unsigned,
                                            linked->locks_want,
                                            iter->locks_want));
-                               if (!btree_iter_get_locks(linked, true, false))
-                                       ret = false;
+                               if (!btree_iter_get_locks(linked, true, false)) {
+                                       deadlock_iter = linked;
+                                       reason = 3;
+                               }
                        } else {
-                               ret = false;
+                               deadlock_iter = linked;
+                               reason = 4;
                        }
                }
 
                /* Must lock btree nodes in key order: */
                if ((cmp_int(iter->btree_id, linked->btree_id) ?:
-                    -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0)
-                       ret = false;
+                    -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0) {
+                       deadlock_iter = linked;
+                       reason = 5;
+               }
 
                if (iter->btree_id == linked->btree_id &&
                    btree_node_locked(linked, level) &&
                    bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
-                                                btree_iter_type(linked))) <= 0)
-                       ret = false;
+                                                btree_iter_type(linked))) <= 0) {
+                       deadlock_iter = linked;
+                       reason = 6;
+               }
 
                /*
                 * Recheck if this is a node we already have locked - since one
                }
        }
 
-       if (unlikely(!ret)) {
-               trace_trans_restart_would_deadlock(iter->trans->ip);
+       if (unlikely(deadlock_iter)) {
+               trace_trans_restart_would_deadlock(iter->trans->ip, ip,
+                               reason,
+                               deadlock_iter->btree_id,
+                               btree_iter_type(deadlock_iter),
+                               iter->btree_id,
+                               btree_iter_type(iter));
                return false;
        }
 
 }
 
 static inline int btree_iter_lock_root(struct btree_iter *iter,
-                                      unsigned depth_want)
+                                      unsigned depth_want,
+                                      unsigned long trace_ip)
 {
        struct bch_fs *c = iter->trans->c;
        struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
                lock_type = __btree_lock_want(iter, iter->level);
                if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
                                              iter, lock_type,
-                                             lock_root_check_fn, rootp)))
+                                             lock_root_check_fn, rootp,
+                                             trace_ip)))
                        return -EINTR;
 
                if (likely(b == READ_ONCE(*rootp) &&
                btree_node_unlock(iter, plevel);
 }
 
-static __always_inline int btree_iter_down(struct btree_iter *iter)
+static __always_inline int btree_iter_down(struct btree_iter *iter,
+                                          unsigned long trace_ip)
 {
        struct bch_fs *c = iter->trans->c;
        struct btree_iter_level *l = &iter->l[iter->level];
        bch2_bkey_unpack(l->b, &tmp.k,
                         bch2_btree_node_iter_peek(&l->iter, l->b));
 
-       b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type);
+       b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type, trace_ip);
        if (unlikely(IS_ERR(b)))
                return PTR_ERR(b);
 
        btree_node_unlock(iter, iter->level++);
 }
 
-static int btree_iter_traverse_one(struct btree_iter *);
+static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
 
 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
 {
        bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
 #undef btree_iter_cmp_by_idx
        bch2_trans_unlock(trans);
+       cond_resched();
 
        if (unlikely(ret == -ENOMEM)) {
                struct closure cl;
                if (!(trans->iters_linked & (1ULL << idx)))
                        continue;
 
-               ret = btree_iter_traverse_one(&trans->iters[idx]);
+               ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
                if (ret)
                        goto retry_all;
        }
  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
  * stashed in the iterator and returned from bch2_trans_exit().
  */
-static int btree_iter_traverse_one(struct btree_iter *iter)
+static int btree_iter_traverse_one(struct btree_iter *iter,
+                                  unsigned long trace_ip)
 {
        unsigned depth_want = iter->level;
 
         */
        while (iter->level > depth_want) {
                int ret = btree_iter_node(iter, iter->level)
-                       ? btree_iter_down(iter)
-                       : btree_iter_lock_root(iter, depth_want);
+                       ? btree_iter_down(iter, trace_ip)
+                       : btree_iter_lock_root(iter, depth_want, trace_ip);
                if (unlikely(ret)) {
                        if (ret == 1)
                                return 0;
        int ret;
 
        ret =   bch2_trans_cond_resched(trans) ?:
-               btree_iter_traverse_one(iter);
+               btree_iter_traverse_one(iter, _RET_IP_);
        if (unlikely(ret))
                ret = __btree_iter_traverse_all(trans, ret);
 
 
 
 bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
                            struct btree_iter *, enum six_lock_type,
-                           six_lock_should_sleep_fn, void *);
+                           six_lock_should_sleep_fn, void *,
+                           unsigned long);
 
 static inline bool btree_node_lock(struct btree *b,
                        struct bpos pos, unsigned level,
                        struct btree_iter *iter,
                        enum six_lock_type type,
-                       six_lock_should_sleep_fn should_sleep_fn, void *p)
+                       six_lock_should_sleep_fn should_sleep_fn, void *p,
+                       unsigned long ip)
 {
        struct btree_trans *trans = iter->trans;
        bool ret;
        ret   = likely(six_trylock_type(&b->c.lock, type)) ||
                btree_node_lock_increment(trans, b, level, type) ||
                __bch2_btree_node_lock(b, pos, level, iter, type,
-                                      should_sleep_fn, p);
+                                      should_sleep_fn, p, ip);
 
 #ifdef CONFIG_BCACHEFS_DEBUG
        trans->locking = NULL;
 
        TP_ARGS(ip)
 );
 
-DEFINE_EVENT(transaction_restart,      trans_restart_would_deadlock,
-       TP_PROTO(unsigned long ip),
-       TP_ARGS(ip)
+TRACE_EVENT(trans_restart_would_deadlock,
+       TP_PROTO(unsigned long  trans_ip,
+                unsigned long  caller_ip,
+                unsigned       reason,
+                enum btree_id  have_btree_id,
+                unsigned       have_iter_type,
+                enum btree_id  want_btree_id,
+                unsigned       want_iter_type),
+       TP_ARGS(trans_ip, caller_ip, reason,
+               have_btree_id, have_iter_type,
+               want_btree_id, want_iter_type),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,          trans_ip        )
+               __field(unsigned long,          caller_ip       )
+               __field(u8,                     reason          )
+               __field(u8,                     have_btree_id   )
+               __field(u8,                     have_iter_type  )
+               __field(u8,                     want_btree_id   )
+               __field(u8,                     want_iter_type  )
+       ),
+
+       TP_fast_assign(
+               __entry->trans_ip               = trans_ip;
+               __entry->caller_ip              = caller_ip;
+               __entry->reason                 = reason;
+               __entry->have_btree_id          = have_btree_id;
+               __entry->have_iter_type         = have_iter_type;
+               __entry->want_btree_id          = want_btree_id;
+               __entry->want_iter_type         = want_iter_type;
+       ),
+
+       TP_printk("%pF %pF because %u have %u:%u want %u:%u",
+                 (void *) __entry->trans_ip,
+                 (void *) __entry->caller_ip,
+                 __entry->reason,
+                 __entry->have_btree_id,
+                 __entry->have_iter_type,
+                 __entry->want_btree_id,
+                 __entry->want_iter_type)
 );
 
 TRACE_EVENT(trans_restart_iters_realloced,