/* Btree iterators: */
+int __must_check
+__bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+ return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+}
+
int __must_check
bch2_btree_iter_traverse(struct btree_iter *iter)
{
iter->path = bch2_path_get(trans,
flags & BTREE_ITER_CACHED,
btree_id,
- btree_iter_search_key(iter),
+ iter->pos,
locks_want,
depth,
flags & BTREE_ITER_INTENT);
void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
bch2_bkey_merge(c, bkey_i_to_s(insert), k);
out:
if (!bkey_deleted(&insert->k)) {
- bch2_btree_iter_set_pos(&iter, insert->k.p);
+ /*
+ * Rewinding iterators is expensive: get a new one and the one
+ * that points to the start of insert will be cloned from:
+ */
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, insert, flags);
}
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
int ret;
+ /*
+ * This traverses us the iterator without changing iter->path->pos to
+ * search_key() (which is pos + 1 for extents): we want there to be a
+ * path already traversed at iter->pos because
+ * bch2_trans_extent_update() will use it to attempt extent merging
+ */
+ ret = __bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
ret = bch2_extent_trim_atomic(trans, iter, k);
if (ret)
return ret;