}
if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
- iter->uptodate = BTREE_ITER_NEED_PEEK;
+ iter->uptodate = BTREE_ITER_UPTODATE;
bch2_trans_verify_locks(trans);
if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
bch2_btree_node_iter_advance(&l->iter, l->b);
-
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
void bch2_btree_iter_fix_key_modified(struct btree_trans *trans,
b, t, k2);
}
}
-
- if (!b->c.level &&
- node_iter == &iter->l[0].iter &&
- iter_current_key_modified)
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
void bch2_btree_node_iter_fix(struct btree_trans *trans,
*/
if (level)
bch2_btree_node_iter_peek(&l->iter, l->b);
-
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
static inline void btree_iter_level_init(struct btree_trans *trans,
}
}
- iter->uptodate = BTREE_ITER_NEED_PEEK;
+ iter->uptodate = BTREE_ITER_UPTODATE;
out:
BUG_ON((ret == -EINTR) != !!trans->restarted);
trace_iter_traverse(trans->ip, trace_ip,
out:
if (l != iter->level)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- else
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_iter_verify(iter);
#ifdef CONFIG_BCACHEFS_DEBUG
return iter->locks_want < new_locks_want
? __bch2_btree_iter_upgrade(trans, iter, new_locks_want)
- : iter->uptodate <= BTREE_ITER_NEED_PEEK;
+ : iter->uptodate == BTREE_ITER_UPTODATE;
}
void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
- iter->uptodate = BTREE_ITER_NEED_PEEK;
+ iter->uptodate = BTREE_ITER_UPTODATE;
if ((iter->flags & BTREE_ITER_INTENT) &&
!bch2_btree_iter_upgrade(trans, iter, 1)) {
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
- BTREE_ITER_NEED_PEEK = 1,
- BTREE_ITER_NEED_RELOCK = 2,
- BTREE_ITER_NEED_TRAVERSE = 3,
+ BTREE_ITER_NEED_RELOCK = 1,
+ BTREE_ITER_NEED_TRAVERSE = 2,
};
#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)