goto out;
} else {
lock_node:
- ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read);
+ ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
return ERR_PTR(ret);
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
return iter->ip_allocated;
#else
return 0;
bch2_bpos_to_text(out, path->pos);
prt_printf(out, " locks %u", path->nodes_locked);
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
prt_printf(out, " %pS", (void *) path->ip_allocated);
#endif
prt_newline(out);
path->nodes_locked = 0;
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
if (ck) {
int ret;
- ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent);
+ ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
if (unlikely(ret)) {
bkey_cached_move_to_freelist(bc, ck);
return ERR_PTR(ret);
* locked:
*/
six_lock_readers_add(&b->lock, -readers);
- ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
+ ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
+ lock_may_not_fail, _RET_IP_);
six_lock_readers_add(&b->lock, readers);
if (ret)
static inline int __btree_node_lock_nopath(struct btree_trans *trans,
struct btree_bkey_cached_common *b,
enum six_lock_type type,
- bool lock_may_not_fail)
+ bool lock_may_not_fail,
+ unsigned long ip)
{
int ret;
trans->lock_must_abort = false;
trans->locking = b;
- ret = six_lock_type_waiter(&b->lock, type, &trans->locking_wait,
- bch2_six_check_for_deadlock, trans);
+ ret = six_lock_type_ip_waiter(&b->lock, type, &trans->locking_wait,
+ bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0);
return ret;
static inline int __must_check
btree_node_lock_nopath(struct btree_trans *trans,
struct btree_bkey_cached_common *b,
- enum six_lock_type type)
+ enum six_lock_type type,
+ unsigned long ip)
{
- return __btree_node_lock_nopath(trans, b, type, false);
+ return __btree_node_lock_nopath(trans, b, type, false, ip);
}
static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
struct btree_bkey_cached_common *b,
enum six_lock_type type)
{
- int ret = __btree_node_lock_nopath(trans, b, type, true);
+ int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
BUG_ON(ret);
}
if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
- !(ret = btree_node_lock_nopath(trans, b, type))) {
+ !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->level].lock_taken_time = local_clock();
#endif
BTREE_ITER_NEED_TRAVERSE = 2,
};
+#if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
+#define TRACK_PATH_ALLOCATED
+#endif
+
struct btree_path {
u8 idx;
u8 sorted_idx;
u64 lock_taken_time;
#endif
} l[BTREE_MAX_DEPTH];
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
unsigned long ip_allocated;
#endif
};
return path->l + path->level;
}
+static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
+{
+#ifdef TRACK_PATH_ALLOCATED
+ return path->ip_allocated;
+#else
+ return _THIS_IP_;
+#endif
+}
+
/*
* @pos - iterator's current position
* @level - current btree depth
/* BTREE_ITER_WITH_JOURNAL: */
size_t journal_idx;
struct bpos journal_pos;
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
unsigned long ip_allocated;
#endif
};