return true;
}
-static inline bool btree_iter_get_locks(struct btree_iter *iter,
- bool upgrade, bool trace)
+static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
+ unsigned long trace_ip)
{
unsigned l = iter->level;
int fail_idx = -1;
if (!(upgrade
? bch2_btree_node_upgrade(iter, l)
: bch2_btree_node_relock(iter, l))) {
- if (trace)
- (upgrade
- ? trace_node_upgrade_fail
- : trace_node_relock_fail)(l, iter->l[l].lock_seq,
- is_btree_node(iter, l)
- ? 0
- : (unsigned long) iter->l[l].b,
- is_btree_node(iter, l)
- ? iter->l[l].b->c.lock.state.seq
- : 0);
+ (upgrade
+ ? trace_node_upgrade_fail
+ : trace_node_relock_fail)(iter->trans->ip, trace_ip,
+ iter->btree_id, &iter->real_pos,
+ l, iter->l[l].lock_seq,
+ is_btree_node(iter, l)
+ ? 0
+ : (unsigned long) iter->l[l].b,
+ is_btree_node(iter, l)
+ ? iter->l[l].b->c.lock.state.seq
+ : 0);
fail_idx = l;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
#endif
__flatten
-bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{
- return btree_iter_get_locks(iter, false, trace);
+ return btree_iter_get_locks(iter, false, trace_ip);
}
bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
iter->locks_want = new_locks_want;
- if (btree_iter_get_locks(iter, true, true))
+ if (btree_iter_get_locks(iter, true, _THIS_IP_))
return true;
/*
linked->btree_id == iter->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_iter_get_locks(linked, true, false);
+ btree_iter_get_locks(linked, true, _THIS_IP_);
}
return false;
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- if (!bch2_btree_iter_relock(iter, true)) {
- trace_trans_restart_relock(trans->ip);
+ if (!bch2_btree_iter_relock(iter, _RET_IP_)) {
+ trace_trans_restart_relock(trans->ip, _RET_IP_,
+ iter->btree_id, &iter->real_pos);
return false;
}
return true;
static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
-static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
+ unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
relock_fail = false;
trans_for_each_iter(trans, iter) {
- if (!bch2_btree_iter_relock(iter, true))
+ if (!bch2_btree_iter_relock(iter, _THIS_IP_))
relock_fail = true;
sorted[nr_sorted++] = iter->idx;
}
trans->in_traverse_all = false;
- trace_trans_traverse_all(trans->ip);
+ trace_trans_traverse_all(trans->ip, trace_ip);
return ret;
}
int bch2_btree_iter_traverse_all(struct btree_trans *trans)
{
- return __btree_iter_traverse_all(trans, 0);
+ return __btree_iter_traverse_all(trans, 0, _RET_IP_);
}
static inline bool btree_iter_good_node(struct btree_iter *iter,
unsigned long trace_ip)
{
unsigned depth_want = iter->level;
+ int ret = 0;
/*
* if we need interior nodes locked, call btree_iter_relock() to make
*/
if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
iter->locks_want > 1)
- bch2_btree_iter_relock(iter, false);
+ bch2_btree_iter_relock(iter, _THIS_IP_);
- if (btree_iter_type(iter) == BTREE_ITER_CACHED)
- return bch2_btree_iter_traverse_cached(iter);
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+ ret = bch2_btree_iter_traverse_cached(iter);
+ goto out;
+ }
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
- return 0;
+ goto out;
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
- return 0;
+ goto out;
iter->level = btree_iter_up_until_good_node(iter, 0);
* btree_iter_lock_root() comes next and that it can't fail
*/
while (iter->level > depth_want) {
- int ret = btree_iter_node(iter, iter->level)
+ ret = btree_iter_node(iter, iter->level)
? btree_iter_down(iter, trace_ip)
: btree_iter_lock_root(iter, depth_want, trace_ip);
if (unlikely(ret)) {
- if (ret == 1)
- return 0;
+ if (ret == 1) {
+ /*
+ * Got to the end of the btree (in
+ * BTREE_ITER_NODES mode)
+ */
+ ret = 0;
+ goto out;
+ }
iter->level = depth_want;
iter->l[iter->level].b =
BTREE_ITER_NO_NODE_DOWN;
}
- return ret;
+ goto out;
}
}
iter->uptodate = BTREE_ITER_NEED_PEEK;
-
+out:
+ trace_iter_traverse(iter->trans->ip, trace_ip,
+ iter->btree_id, &iter->real_pos, ret);
bch2_btree_iter_verify(iter);
- return 0;
+ return ret;
}
static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
ret = bch2_trans_cond_resched(trans) ?:
btree_iter_traverse_one(iter, _RET_IP_);
if (unlikely(ret))
- ret = __btree_iter_traverse_all(trans, ret);
+ ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
return ret;
}
static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
{
+ struct bpos old_pos = iter->real_pos;
int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level;
if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
btree_node_unlock(iter, 0);
- iter->l[0].b = BTREE_ITER_NO_NODE_UP;
+ iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
return;
}
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_iter_verify(iter);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
+ iter->btree_id,
+ &old_pos, &new_pos, l);
+#endif
}
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
best = iter;
}
- trace_trans_get_iter(_RET_IP_, trans->ip,
- btree_id,
- &real_pos, locks_want,
- best ? &best->real_pos : &pos_min,
- best ? best->locks_want : 0,
- best ? best->uptodate : BTREE_ITER_NEED_TRAVERSE);
-
if (!best) {
iter = btree_trans_iter_alloc(trans);
bch2_btree_iter_init(trans, iter, btree_id);
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > iter->locks_want) {
iter->locks_want = locks_want;
- btree_iter_get_locks(iter, true, false);
+ btree_iter_get_locks(iter, true, _THIS_IP_);
}
while (iter->level != depth) {
bch2_btree_iter_set_pos(iter, pos);
btree_iter_set_search_pos(iter, real_pos);
+ trace_trans_get_iter(_RET_IP_, trans->ip,
+ btree_id,
+ &real_pos, locks_want, iter->uptodate,
+ best ? &best->real_pos : &pos_min,
+ best ? best->locks_want : U8_MAX,
+ best ? best->uptodate : U8_MAX);
+
return iter;
}
}
static noinline int
-bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s)
+bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
+ unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
int ret;
return ret;
if (!bch2_trans_relock(trans)) {
- trace_trans_restart_journal_preres_get(trans->ip);
+ trace_trans_restart_journal_preres_get(trans->ip, trace_ip);
return -EINTR;
}
static inline int
bch2_trans_commit_write_locked(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+ struct btree_insert_entry **stopped_at,
+ unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
int ret;
if (race_fault()) {
- trace_trans_restart_fault_inject(trans->ip);
+ trace_trans_restart_fault_inject(trans->ip, trace_ip);
return -EINTR;
}
* Get journal reservation, take write locks, and attempt to do btree update(s):
*/
static inline int do_bch2_trans_commit(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+ struct btree_insert_entry **stopped_at,
+ unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
? JOURNAL_RES_GET_RESERVED : 0));
if (unlikely(ret == -EAGAIN))
ret = bch2_trans_journal_preres_get_cold(trans,
- trans->journal_preres_u64s);
+ trans->journal_preres_u64s, trace_ip);
if (unlikely(ret))
return ret;
if (iter->nodes_locked != iter->nodes_intent_locked) {
if (btree_iter_keep(trans, iter)) {
if (!bch2_btree_iter_upgrade(iter, 1)) {
- trace_trans_restart_upgrade(trans->ip);
+ trace_trans_restart_upgrade(trans->ip, trace_ip,
+ iter->btree_id,
+ &iter->real_pos);
return -EINTR;
}
} else {
bch2_btree_node_lock_for_insert(c,
iter_l(i->iter)->b, i->iter);
- ret = bch2_trans_commit_write_locked(trans, stopped_at);
+ ret = bch2_trans_commit_write_locked(trans, stopped_at, trace_ip);
trans_for_each_update2(trans, i)
if (!same_leaf_as_prev(trans, i))
static noinline
int bch2_trans_commit_error(struct btree_trans *trans,
struct btree_insert_entry *i,
- int ret)
+ int ret, unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
unsigned flags = trans->flags;
if (!ret ||
ret == -EINTR ||
(flags & BTREE_INSERT_NOUNLOCK)) {
- trace_trans_restart_btree_node_split(trans->ip);
+ trace_trans_restart_btree_node_split(trans->ip, trace_ip,
+ i->iter->btree_id,
+ &i->iter->real_pos);
ret = -EINTR;
}
break;
if (bch2_trans_relock(trans))
return 0;
- trace_trans_restart_mark_replicas(trans->ip);
+ trace_trans_restart_mark_replicas(trans->ip, trace_ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
if (bch2_trans_relock(trans))
return 0;
- trace_trans_restart_journal_res_get(trans->ip);
+ trace_trans_restart_journal_res_get(trans->ip, trace_ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
bch2_trans_unlock(trans);
- trace_trans_blocked_journal_reclaim(trans->ip);
+ trace_trans_blocked_journal_reclaim(trans->ip, trace_ip);
wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c)));
if (bch2_trans_relock(trans))
return 0;
- trace_trans_restart_journal_reclaim(trans->ip);
+ trace_trans_restart_journal_reclaim(trans->ip, trace_ip);
ret = -EINTR;
break;
default:
i->trigger_flags);
if (unlikely(ret)) {
if (ret == -EINTR)
- trace_trans_restart_mark(trans->ip);
+ trace_trans_restart_mark(trans->ip, _RET_IP_,
+ i->iter->btree_id,
+ &i->iter->pos);
goto out;
}
}
trans_for_each_update2(trans, i) {
ret = bch2_btree_iter_traverse(i->iter);
if (unlikely(ret)) {
- trace_trans_restart_traverse(trans->ip);
+ trace_trans_restart_traverse(trans->ip, _RET_IP_,
+ i->iter->btree_id,
+ &i->iter->pos);
goto out;
}
if (unlikely(!bch2_btree_iter_upgrade(i->iter, i->level + 1))) {
- trace_trans_restart_upgrade(trans->ip);
+ trace_trans_restart_upgrade(trans->ip, _RET_IP_,
+ i->iter->btree_id,
+ &i->iter->pos);
ret = -EINTR;
goto out;
}
retry:
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
- ret = do_bch2_trans_commit(trans, &i);
+ ret = do_bch2_trans_commit(trans, &i, _RET_IP_);
/* make sure we didn't drop or screw up locks: */
bch2_btree_trans_verify_locks(trans);
return ret;
err:
- ret = bch2_trans_commit_error(trans, i, ret);
+ ret = bch2_trans_commit_error(trans, i, ret, _RET_IP_);
if (ret)
goto out;
);
TRACE_EVENT(trans_get_iter,
- TP_PROTO(unsigned long caller, unsigned long ip,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
enum btree_id btree_id,
- struct bpos *pos_want,
- unsigned locks_want,
- struct bpos *pos_found,
- unsigned locks_found,
- unsigned uptodate),
- TP_ARGS(caller, ip, btree_id,
- pos_want, locks_want,
- pos_found, locks_found,
- uptodate),
+ struct bpos *got_pos,
+ unsigned got_locks,
+ unsigned got_uptodate,
+ struct bpos *src_pos,
+ unsigned src_locks,
+ unsigned src_uptodate),
+ TP_ARGS(trans_ip, caller_ip, btree_id,
+ got_pos, got_locks, got_uptodate,
+ src_pos, src_locks, src_uptodate),
TP_STRUCT__entry(
- __field(unsigned long, caller )
- __field(unsigned long, ip )
- __field(u8, btree_id )
- __field(u8, uptodate )
- __field(u8, locks_want )
- __field(u8, locks_found )
- __field(u64, pos_want_inode )
- __field(u64, pos_want_offset )
- __field(u32, pos_want_snapshot )
- __field(u64, pos_found_inode )
- __field(u64, pos_found_offset )
- __field(u32, pos_found_snapshot )
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, got_pos_inode )
+ __field(u64, got_pos_offset )
+ __field(u32, got_pos_snapshot )
+ __field(u8, got_locks )
+ __field(u8, got_uptodate )
+ __field(u64, src_pos_inode )
+ __field(u64, src_pos_offset )
+ __field(u32, src_pos_snapshot )
+ __field(u8, src_locks )
+ __field(u8, src_uptodate )
),
TP_fast_assign(
- __entry->caller = caller;
- __entry->ip = ip;
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
- __entry->uptodate = uptodate;
- __entry->pos_want_inode = pos_want->inode;
- __entry->pos_want_offset = pos_want->offset;
- __entry->pos_want_snapshot = pos_want->snapshot;
- __entry->pos_found_inode = pos_found->inode;
- __entry->pos_found_offset = pos_found->offset;
- __entry->pos_found_snapshot = pos_found->snapshot;
- ),
-
- TP_printk("%ps %pS btree %u uptodate %u want %llu:%llu:%u locks %u found %llu:%llu:%u locks %u",
- (void *) __entry->caller,
- (void *) __entry->ip,
+ __entry->got_pos_inode = got_pos->inode;
+ __entry->got_pos_offset = got_pos->offset;
+ __entry->got_pos_snapshot = got_pos->snapshot;
+ __entry->got_locks = got_locks;
+ __entry->got_uptodate = got_uptodate;
+ __entry->src_pos_inode = src_pos->inode;
+ __entry->src_pos_offset = src_pos->offset;
+ __entry->src_pos_snapshot = src_pos->snapshot;
+ __entry->src_locks = src_locks;
+ __entry->src_uptodate = src_uptodate;
+ ),
+
+ TP_printk("%ps %pS btree %u got %llu:%llu:%u l %u u %u "
+ "src %llu:%llu:%u l %u u %u",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip,
__entry->btree_id,
- __entry->uptodate,
- __entry->pos_want_inode,
- __entry->pos_want_offset,
- __entry->pos_want_snapshot,
- __entry->locks_want,
- __entry->pos_found_inode,
- __entry->pos_found_offset,
- __entry->pos_found_snapshot,
- __entry->locks_found)
+ __entry->got_pos_inode,
+ __entry->got_pos_offset,
+ __entry->got_pos_snapshot,
+ __entry->got_locks,
+ __entry->got_uptodate,
+ __entry->src_pos_inode,
+ __entry->src_pos_offset,
+ __entry->src_pos_snapshot,
+ __entry->src_locks,
+ __entry->src_uptodate)
);
TRACE_EVENT(transaction_restart_ip,
);
DECLARE_EVENT_CLASS(transaction_restart,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip),
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip),
TP_STRUCT__entry(
- __field(unsigned long, ip )
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
),
TP_fast_assign(
- __entry->ip = ip;
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
),
- TP_printk("%pS", (void *) __entry->ip)
+ TP_printk("%pS %pS",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip)
);
-DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
+DEFINE_EVENT(transaction_restart, trans_blocked_journal_reclaim,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
);
-DEFINE_EVENT(transaction_restart, trans_blocked_journal_reclaim,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
+DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_traverse_all,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip),
+ TP_ARGS(trans_ip, caller_ip)
+);
+
+DECLARE_EVENT_CLASS(transaction_restart_iter,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ ),
+
+ TP_fast_assign(
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ ),
+
+ TP_printk("%ps %pS btree %u pos %llu:%llu:%u",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_mark,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_upgrade,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_iter_upgrade,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos)
+);
+
+TRACE_EVENT(iter_traverse,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ int ret),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ __field(s32, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%ps %pS pos %u %llu:%llu:%u ret %i",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->ret)
+);
+
+TRACE_EVENT(iter_set_search_pos,
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *old_pos,
+ struct bpos *new_pos,
+ unsigned good_level),
+ TP_ARGS(trans_ip, caller_ip, btree_id, old_pos, new_pos, good_level),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, old_pos_inode )
+ __field(u64, old_pos_offset )
+ __field(u32, old_pos_snapshot )
+ __field(u64, new_pos_inode )
+ __field(u64, new_pos_offset )
+ __field(u32, new_pos_snapshot )
+ __field(u8, good_level )
+ ),
+
+ TP_fast_assign(
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->old_pos_inode = old_pos->inode;
+ __entry->old_pos_offset = old_pos->offset;
+ __entry->old_pos_snapshot = old_pos->snapshot;
+ __entry->new_pos_inode = new_pos->inode;
+ __entry->new_pos_offset = new_pos->offset;
+ __entry->new_pos_snapshot = new_pos->snapshot;
+ __entry->good_level = good_level;
+ ),
+
+ TP_printk("%ps %pS btree %u old pos %llu:%llu:%u new pos %llu:%llu:%u l %u",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->old_pos_inode,
+ __entry->old_pos_offset,
+ __entry->old_pos_snapshot,
+ __entry->new_pos_inode,
+ __entry->new_pos_offset,
+ __entry->new_pos_snapshot,
+ __entry->good_level)
);
TRACE_EVENT(trans_restart_would_deadlock,
__entry->bytes)
);
-DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_mark,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_upgrade,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_relock,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_restart_traverse,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
-DEFINE_EVENT(transaction_restart, trans_traverse_all,
- TP_PROTO(unsigned long ip),
- TP_ARGS(ip)
-);
-
DECLARE_EVENT_CLASS(node_lock_fail,
- TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
- TP_ARGS(level, iter_seq, node, node_seq),
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos,
+ level, iter_seq, node, node_seq),
TP_STRUCT__entry(
- __field(u32, level)
- __field(u32, iter_seq)
- __field(u32, node)
- __field(u32, node_seq)
+ __field(unsigned long, trans_ip )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ __field(u32, level )
+ __field(u32, iter_seq )
+ __field(u32, node )
+ __field(u32, node_seq )
),
TP_fast_assign(
- __entry->level = level;
- __entry->iter_seq = iter_seq;
- __entry->node = node;
- __entry->node_seq = node_seq;
+ __entry->trans_ip = trans_ip;
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = btree_id;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ __entry->level = level;
+ __entry->iter_seq = iter_seq;
+ __entry->node = node;
+ __entry->node_seq = node_seq;
),
- TP_printk("level %u iter seq %u node %u node seq %u",
+ TP_printk("%ps %pS btree %u pos %llu:%llu:%u level %u iter seq %u node %u node seq %u",
+ (void *) __entry->trans_ip,
+ (void *) __entry->caller_ip,
+ __entry->btree_id,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
__entry->level, __entry->iter_seq,
__entry->node, __entry->node_seq)
);
DEFINE_EVENT(node_lock_fail, node_upgrade_fail,
- TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
- TP_ARGS(level, iter_seq, node, node_seq)
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos,
+ level, iter_seq, node, node_seq)
);
DEFINE_EVENT(node_lock_fail, node_relock_fail,
- TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
- TP_ARGS(level, iter_seq, node, node_seq)
+ TP_PROTO(unsigned long trans_ip,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(trans_ip, caller_ip, btree_id, pos,
+ level, iter_seq, node, node_seq)
);
#endif /* _TRACE_BCACHEFS_H */