}
}
- if (ret)
- __btree_node_lock_type(iter->trans->c, b, type);
- else
+ if (unlikely(!ret)) {
trans_restart();
+ trace_trans_restart_would_deadlock(iter->trans->c,
+ iter->trans->ip);
+ return false;
+ }
- return ret;
+ __btree_node_lock_type(iter->trans->c, b, type);
+ return true;
}
/* Btree iterator locking: */
if (trans->iters_live) {
trans_restart();
+ trace_trans_restart_iters_realloced(trans->c, trans->ip);
return -EINTR;
}
if (old_bytes) {
trans_restart();
+ trace_trans_restart_mem_realloced(trans->c, trans->ip);
return ERR_PTR(-EINTR);
}
}
memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
trans->c = c;
+ trans->ip = _RET_IP_;
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
if (!bch2_btree_trans_relock(trans)) {
trans_restart(" (iter relock after journal preres get blocked)");
+ trace_trans_restart_journal_preres_get(c, trans->ip);
return -EINTR;
}
if (race_fault()) {
ret = -EINTR;
trans_restart(" (race)");
+ trace_trans_restart_fault_inject(c, trans->ip);
goto out;
}
*/
if (!ret || (flags & BTREE_INSERT_NOUNLOCK)) {
trans_restart(" (split)");
+ trace_trans_restart_btree_node_split(c, trans->ip);
ret = -EINTR;
}
break;
return 0;
trans_restart(" (iter relock after marking replicas)");
+ trace_trans_restart_mark_replicas(c, trans->ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
return 0;
trans_restart(" (iter relock after journal res get blocked)");
+ trace_trans_restart_journal_res_get(c, trans->ip);
ret = -EINTR;
break;
default:
if (ret2) {
trans_restart(" (traverse)");
+ trace_trans_restart_traverse(c, trans->ip);
return ret2;
}
return 0;
trans_restart(" (atomic)");
+ trace_trans_restart_atomic(c, trans->ip);
}
return ret;
__entry->buckets_moved, __entry->buckets_not_moved)
);
+DECLARE_EVENT_CLASS(transaction_restart,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip),
+
+ TP_STRUCT__entry(
+ __array(char, name, 16)
+ __field(unsigned long, ip )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->name, c->name, 16);
+ __entry->ip = ip;
+ ),
+
+ TP_printk("%pS", (void *) __entry->ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_would_deadlock,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_iters_realloced,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_mem_realloced,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_traverse,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_atomic,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip)
+);
+
#endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */