static void __maybe_unused
bch2_btree_path_node_to_text(struct printbuf *out,
- struct btree_bkey_cached_common *b,
- bool cached)
+ struct btree_bkey_cached_common *b)
{
struct six_lock_count c = six_lock_counts(&b->lock);
struct task_struct *owner;
prt_printf(out, " l=%u %s:",
b->level, bch2_btree_ids[b->btree_id]);
- bch2_bpos_to_text(out, btree_node_pos(b, cached));
+ bch2_bpos_to_text(out, btree_node_pos(b));
prt_printf(out, " locks %u:%u:%u held by pid %u",
c.n[0], c.n[1], c.n[2], pid);
!IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
prt_printf(out, " %c l=%u ",
lock_types[btree_node_locked_type(path, l)], l);
- bch2_btree_path_node_to_text(out, b, path->cached);
+ bch2_btree_path_node_to_text(out, b);
prt_printf(out, "\n");
}
}
bch2_bpos_to_text(out, trans->locking_pos);
prt_printf(out, " node ");
- bch2_btree_path_node_to_text(out, b, path->cached);
+ bch2_btree_path_node_to_text(out, b);
prt_printf(out, "\n");
}
}
INIT_LIST_HEAD(&ck->list);
__six_lock_init(&ck->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
lockdep_set_novalidate_class(&ck->c.lock);
+ ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
BUG_ON(!six_trylock_write(&ck->c.lock));
return ck;
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
- bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c,
- linked->cached)) <= 0) {
+ bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c)) <= 0) {
reason = 7;
goto deadlock;
}
struct six_lock lock;
u8 level;
u8 btree_id;
+ bool cached;
};
struct btree {
struct bkey_i *k;
};
-static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b,
- bool cached)
+static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
{
- return !cached
+ return !b->cached
? container_of(b, struct btree, c)->key.k.p
: container_of(b, struct bkey_cached, c)->key.pos;
}