BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
- BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
- iter->pos.snapshot != iter->snapshot);
-
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
return k;
}
-/**
- * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
- * current position
- */
-struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
struct btree_trans *trans = iter->trans;
- struct bpos search_key = btree_iter_search_key(iter);
struct bkey_i *next_update;
struct bkey_s_c k;
int ret;
EBUG_ON(iter->path->cached || iter->path->level);
bch2_btree_iter_verify(iter);
- bch2_btree_iter_verify_entry_exit(iter);
while (1) {
iter->path = btree_path_set_pos(trans, iter->path, search_key,
}
if (likely(k.k)) {
- /*
- * We can never have a key in a leaf node at POS_MAX, so
- * we don't have to check these successor() calls:
- */
- if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
- !bch2_snapshot_is_ancestor(trans->c,
- iter->snapshot,
- k.k->p.snapshot)) {
- search_key = bpos_successor(k.k->p);
- continue;
- }
-
- if (bkey_whiteout(k.k) &&
- !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
- search_key = bkey_successor(iter, k.k->p);
- continue;
- }
-
break;
} else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
/* Advance to next leaf node: */
}
}
+ iter->path = btree_path_set_pos(trans, iter->path, k.k->p,
+ iter->flags & BTREE_ITER_INTENT);
+ BUG_ON(!iter->path->nodes_locked);
+out:
+ iter->path->should_be_locked = true;
+
+ bch2_btree_iter_verify(iter);
+
+ return k;
+}
+
+/**
+ * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
+ * current position
+ */
+struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key = btree_iter_search_key(iter);
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ while (1) {
+ k = __bch2_btree_iter_peek(iter, search_key);
+ if (!k.k || bkey_err(k))
+ goto out;
+
+ /*
+ * We can never have a key in a leaf node at POS_MAX, so
+ * we don't have to check these successor() calls:
+ */
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ !bch2_snapshot_is_ancestor(trans->c,
+ iter->snapshot,
+ k.k->p.snapshot)) {
+ search_key = bpos_successor(k.k->p);
+ continue;
+ }
+
+ if (bkey_whiteout(k.k) &&
+ !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ search_key = bkey_successor(iter, k.k->p);
+ continue;
+ }
+
+ break;
+ }
+
/*
* iter->pos should be mononotically increasing, and always be equal to
* the key we just returned - except extents can straddle iter->pos:
iter->pos = k.k->p;
else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
iter->pos = bkey_start_pos(k.k);
-
- if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+out:
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
iter->pos.snapshot = iter->snapshot;
- iter->path = btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_INTENT);
- BUG_ON(!iter->path->nodes_locked);
-out:
- iter->path->should_be_locked = true;
+ ret = bch2_btree_iter_verify_ret(iter, k);
+ if (unlikely(ret)) {
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ }
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
- ret = bch2_btree_iter_verify_ret(iter, k);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
return k;
}
bool bch2_btree_iter_advance(struct btree_iter *);
bool bch2_btree_iter_rewind(struct btree_iter *);
-static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
- if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
- new_pos.snapshot = iter->snapshot;
-
iter->k.type = KEY_TYPE_deleted;
iter->k.p.inode = iter->pos.inode = new_pos.inode;
iter->k.p.offset = iter->pos.offset = new_pos.offset;
iter->k.size = 0;
}
+static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+{
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ new_pos.snapshot = iter->snapshot;
+
+ __bch2_btree_iter_set_pos(iter, new_pos);
+}
+
static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
{
BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
return PTR_ERR_OR_ZERO(k.k);
}
-static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
unsigned flags)
{
return flags & BTREE_ITER_SLOTS
struct bkey_s_c k;
while (btree_trans_too_many_iters(trans) ||
- (k = __bch2_btree_iter_peek(iter, flags),
+ (k = bch2_btree_iter_peek_type(iter, flags),
bkey_err(k) == -EINTR))
bch2_trans_begin(trans);
_start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
- (_k) = __bch2_btree_iter_peek(&(_iter), _flags), \
+ (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter)))
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for (; \
- (_k) = __bch2_btree_iter_peek(&(_iter), _flags), \
+ (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter)))