}
void __bch2_btree_iter_downgrade(struct btree_iter *iter,
- unsigned downgrade_to)
+ unsigned new_locks_want)
{
- unsigned l, new_locks_want = downgrade_to ?:
- (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
+ unsigned l;
- if (iter->locks_want < downgrade_to) {
- iter->locks_want = new_locks_want;
+ EBUG_ON(iter->locks_want < new_locks_want);
- while (iter->nodes_locked &&
- (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
- if (l > iter->level) {
- btree_node_unlock(iter, l);
- } else {
- if (btree_node_intent_locked(iter, l)) {
- six_lock_downgrade(&iter->l[l].b->c.lock);
- iter->nodes_intent_locked ^= 1 << l;
- }
- break;
+ iter->locks_want = new_locks_want;
+
+ while (iter->nodes_locked &&
+ (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
+ if (l > iter->level) {
+ btree_node_unlock(iter, l);
+ } else {
+ if (btree_node_intent_locked(iter, l)) {
+ six_lock_downgrade(&iter->l[l].b->c.lock);
+ iter->nodes_intent_locked ^= 1 << l;
}
+ break;
}
}
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
+ unsigned locks_want,
+ unsigned depth,
unsigned flags)
{
struct btree_iter *iter, *best = NULL;
pos.snapshot = btree_type_has_snapshots(btree_id)
? U32_MAX : 0;
- /* We always want a fresh iterator for node iterators: */
- if ((flags & BTREE_ITER_TYPE) == BTREE_ITER_NODES)
- goto alloc_iter;
-
trans_for_each_iter(trans, iter) {
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
continue;
best = iter;
}
-alloc_iter:
+
if (!best) {
iter = btree_trans_iter_alloc(trans);
bch2_btree_iter_init(trans, iter, btree_id);
iter->snapshot = pos.snapshot;
- if (!(iter->flags & BTREE_ITER_INTENT))
- bch2_btree_iter_downgrade(iter);
- else if (!iter->locks_want) {
- iter->locks_want = 1;
+ locks_want = min(locks_want, BTREE_MAX_DEPTH);
+
+ if (locks_want > iter->locks_want) {
+ iter->locks_want = locks_want;
btree_iter_get_locks(iter, true, false);
+ } else if (locks_want < iter->locks_want) {
+ __bch2_btree_iter_downgrade(iter, locks_want);
}
+ while (iter->level < depth) {
+ btree_node_unlock(iter, iter->level);
+ iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
+ iter->level++;
+ }
+
+ while (iter->level > depth)
+ iter->l[--iter->level].b = BTREE_ITER_NO_NODE_INIT;
+
+ iter->min_depth = depth;
+
bch2_btree_iter_set_pos(iter, pos);
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
{
struct btree_iter *iter =
__bch2_trans_get_iter(trans, btree_id, pos,
- BTREE_ITER_NODES|
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_ALL_SNAPSHOTS|
- flags);
- unsigned i;
+ locks_want, depth,
+ BTREE_ITER_NODES|
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ flags);
BUG_ON(bkey_cmp(iter->pos, pos));
-
- iter->locks_want = locks_want;
- iter->level = depth;
- iter->min_depth = depth;
-
- for (i = 0; i < ARRAY_SIZE(iter->l); i++)
- iter->l[i].b = NULL;
- iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
+ BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
+ BUG_ON(iter->level != depth);
+ BUG_ON(iter->min_depth != depth);
iter->ip_allocated = _RET_IP_;
return iter;
bch2_bpos_to_text(out, btree_node_pos(_b, type));
}
+#ifdef CONFIG_BCACHEFS_DEBUG
static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
{
struct btree_iter *iter;
return true;
return false;
}
+#endif
void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
{
static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
{
- if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
- __bch2_btree_iter_downgrade(iter, 0);
+ unsigned new_locks_want = (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
+
+ if (iter->locks_want > new_locks_want)
+ __bch2_btree_iter_downgrade(iter, new_locks_want);
}
void bch2_trans_downgrade(struct btree_trans *);
void bch2_trans_unlink_iters(struct btree_trans *);
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
- struct bpos, unsigned);
+ struct bpos, unsigned,
+ unsigned, unsigned);
static inline struct btree_iter *
bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
struct bpos pos, unsigned flags)
{
struct btree_iter *iter =
- __bch2_trans_get_iter(trans, btree_id, pos, flags);
+ __bch2_trans_get_iter(trans, btree_id, pos,
+ (flags & BTREE_ITER_INTENT) != 0, 0,
+ flags);
iter->ip_allocated = _THIS_IP_;
return iter;
}