int want = __btree_lock_want(path, level);
if (!is_btree_node(path, level))
- return false;
+ goto fail;
if (race_fault())
- return false;
+ goto fail;
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(path, level, want);
return true;
- } else {
- return false;
}
+fail:
+ trace_btree_node_relock_fail(trans->fn, _RET_IP_,
+ path->btree_id,
+ &path->pos,
+ (unsigned long) b,
+ path->l[level].lock_seq,
+ is_btree_node(path, level) ? b->c.lock.state.seq : 0);
+ return false;
}
bool bch2_btree_node_upgrade(struct btree_trans *trans,
static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
- bool upgrade, unsigned long trace_ip)
+ bool upgrade)
{
unsigned l = path->level;
int fail_idx = -1;
if (!bch2_btree_node_relock(trans, path, l)) {
__bch2_btree_path_unlock(path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
+ path->btree_id, &path->pos);
btree_trans_restart(trans);
return false;
}
static bool bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
- bool ret = btree_path_get_locks(trans, path, false, trace_ip);
+ bool ret = btree_path_get_locks(trans, path, false);
- if (!ret)
+ if (!ret) {
+ trace_trans_restart_relock_path(trans->fn, trace_ip,
+ path->btree_id, &path->pos);
btree_trans_restart(trans);
+ }
return ret;
}
path->locks_want = new_locks_want;
- if (btree_path_get_locks(trans, path, true, _THIS_IP_))
+ if (btree_path_get_locks(trans, path, true))
return true;
/*
linked->btree_id == path->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true, _THIS_IP_);
+ btree_path_get_locks(trans, linked, true);
}
return false;
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > path->locks_want) {
path->locks_want = locks_want;
- btree_path_get_locks(trans, path, true, _THIS_IP_);
+ btree_path_get_locks(trans, path, true);
}
return path;
__bch2_btree_path_unlock(path);
path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+ trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
+ path->btree_id, &path->pos);
btree_trans_restart(trans);
ret = -EINTR;
goto err;
__entry->ret)
);
+TRACE_EVENT(btree_node_relock_fail,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos,
+ unsigned long node,
+ u32 iter_lock_seq,
+ u32 node_lock_seq),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos, node, iter_lock_seq, node_lock_seq),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 24 )
+ __array(char, caller, 32 )
+ __field(u8, btree_id )
+ __field(u64, pos_inode )
+ __field(u64, pos_offset )
+ __field(u32, pos_snapshot )
+ __field(unsigned long, node )
+ __field(u32, iter_lock_seq )
+ __field(u32, node_lock_seq )
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip);
+ __entry->btree_id = btree_id;
+ __entry->pos_inode = pos->inode;
+ __entry->pos_offset = pos->offset;
+ __entry->pos_snapshot = pos->snapshot;
+ __entry->node = node;
+ __entry->iter_lock_seq = iter_lock_seq;
+ __entry->node_lock_seq = node_lock_seq;
+ ),
+
+ TP_printk("%s %s btree %u pos %llu:%llu:%u, node %lu iter seq %u lock seq %u",
+ __entry->trans_fn,
+ __entry->caller,
+ __entry->btree_id,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->node,
+ __entry->iter_lock_seq,
+ __entry->node_lock_seq)
+);
+
/* Garbage collection */
DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
- __field(unsigned long, caller_ip )
+ __array(char, caller, 32 )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
- __entry->caller_ip = caller_ip;
+ snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip);
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
),
- TP_printk("%s %pS btree %u pos %llu:%llu:%u",
+ TP_printk("%s %s btree %u pos %llu:%llu:%u",
__entry->trans_fn,
- (void *) __entry->caller_ip,
+ __entry->caller,
__entry->btree_id,
__entry->pos_inode,
__entry->pos_offset,
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ enum btree_id btree_id,
+ struct bpos *pos),
+ TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+);
+
DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,