(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
!btree_type_has_snapshots(iter->btree_id));
+ if (iter->update_path)
+ bch2_btree_path_verify(trans, iter->update_path);
bch2_btree_path_verify(trans, iter->path);
}
goto out;
}
}
-
- iter->path = btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_INTENT);
- BUG_ON(!iter->path->nodes_locked);
out:
- iter->path->should_be_locked = true;
-
bch2_btree_iter_verify(iter);
return k;
struct bkey_s_c k;
int ret;
+ if (iter->update_path) {
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
+
bch2_btree_iter_verify_entry_exit(iter);
while (1) {
if (!k.k || bkey_err(k))
goto out;
+ if (iter->update_path &&
+ bkey_cmp(iter->update_path->pos, k.k->p)) {
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
+
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ (iter->flags & BTREE_ITER_INTENT) &&
+ !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !iter->update_path) {
+ struct bpos pos = k.k->p;
+
+ if (pos.snapshot < iter->snapshot) {
+ search_key = bpos_successor(k.k->p);
+ continue;
+ }
+
+ pos.snapshot = iter->snapshot;
+
+ /*
+ * advance, same as on exit for iter->path, but only up
+ * to snapshot
+ */
+ __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = iter->path;
+
+ iter->update_path = btree_path_set_pos(trans,
+ iter->update_path, pos,
+ iter->flags & BTREE_ITER_INTENT);
+
+ BUG_ON(!(iter->update_path->nodes_locked & 1));
+ iter->update_path->should_be_locked = true;
+ }
+
/*
* We can never have a key in a leaf node at POS_MAX, so
* we don't have to check these successor() calls:
iter->pos = k.k->p;
else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
iter->pos = bkey_start_pos(k.k);
+
+ iter->path = btree_path_set_pos(trans, iter->path, k.k->p,
+ iter->flags & BTREE_ITER_INTENT);
+ BUG_ON(!iter->path->nodes_locked);
out:
+ if (iter->update_path) {
+ BUG_ON(!(iter->update_path->nodes_locked & 1));
+ iter->update_path->should_be_locked = true;
+ }
+ iter->path->should_be_locked = true;
+
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
iter->pos.snapshot = iter->snapshot;
if (iter->path)
bch2_path_put(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
+ if (iter->update_path)
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
iter->path = NULL;
+ iter->update_path = NULL;
}
static void __bch2_trans_iter_init(struct btree_trans *trans,
iter->trans = trans;
iter->path = NULL;
+ iter->update_path = NULL;
iter->btree_id = btree_id;
iter->min_depth = depth;
iter->flags = flags;
*dst = *src;
if (src->path)
__btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
+ if (src->update_path)
+ __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
}
void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
+ if (unlikely(iter->update_path))
+ bch2_path_put(iter->trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
new_pos.snapshot = iter->snapshot;
struct btree_iter {
struct btree_trans *trans;
struct btree_path *path;
+ struct btree_path *update_path;
enum btree_id btree_id:4;
unsigned min_depth:4;
int bch2_btree_node_update_key_get_iter(struct btree_trans *,
struct btree *, struct bkey_i *, bool);
+int bch2_trans_update_extent(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, enum btree_update_flags);
+
+int __must_check bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
+ struct bkey_i *, enum btree_update_flags);
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_update_flags);
+
void bch2_trans_commit_hook(struct btree_trans *,
struct btree_trans_commit_hook *);
int __bch2_trans_commit(struct btree_trans *);
return 0;
}
-static int bch2_trans_update_extent(struct btree_trans *trans,
- struct btree_iter *orig_iter,
- struct bkey_i *insert,
- enum btree_update_flags flags)
+int bch2_trans_update_extent(struct btree_trans *trans,
+ struct btree_iter *orig_iter,
+ struct bkey_i *insert,
+ enum btree_update_flags flags)
{
struct btree_iter iter, update_iter;
struct bpos start = bkey_start_pos(&insert->k);
bkey_reassemble(update, k);
bch2_cut_front(insert->k.p, update);
- bch2_trans_copy_iter(&update_iter, &iter);
- update_iter.pos = update->k.p;
- ret = bch2_trans_update(trans, &update_iter, update,
+ ret = bch2_trans_update_by_path(trans, iter.path, update,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
flags);
- bch2_trans_iter_exit(trans, &update_iter);
-
if (ret)
goto err;
goto out;
return ret;
}
-int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+int __must_check bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
struct bkey_i *k, enum btree_update_flags flags)
{
struct btree_insert_entry *i, n;
- BUG_ON(!iter->path->should_be_locked);
-
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- return bch2_trans_update_extent(trans, iter, k, flags);
+ BUG_ON(!path->should_be_locked);
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
- BUG_ON(bpos_cmp(k->k.p, iter->path->pos));
+ BUG_ON(bpos_cmp(k->k.p, path->pos));
n = (struct btree_insert_entry) {
.flags = flags,
- .bkey_type = __btree_node_type(iter->path->level, iter->btree_id),
- .btree_id = iter->btree_id,
- .level = iter->path->level,
- .cached = iter->flags & BTREE_ITER_CACHED,
- .path = iter->path,
+ .bkey_type = __btree_node_type(path->level, path->btree_id),
+ .btree_id = path->btree_id,
+ .level = path->level,
+ .cached = path->cached,
+ .path = path,
.k = k,
.ip_allocated = _RET_IP_,
};
btree_insert_entry_cmp(i - 1, i) >= 0);
#endif
- if (bkey_deleted(&n.k->k) &&
- (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
- int ret = need_whiteout_for_snapshot(trans, n.btree_id, n.k->k.p);
- if (unlikely(ret < 0))
- return ret;
-
- if (ret)
- n.k->k.type = KEY_TYPE_whiteout;
- }
-
/*
* Pending updates are kept sorted: first, find position of new update,
* then delete/trim any updates the new update overwrites:
i - trans->updates, n);
__btree_path_get(n.path, true);
-
return 0;
}
+int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ return bch2_trans_update_extent(trans, iter, k, flags);
+
+ if (bkey_deleted(&k->k) &&
+ (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
+ int ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (ret)
+ k->k.type = KEY_TYPE_whiteout;
+ }
+
+ return bch2_trans_update_by_path(trans, iter->update_path ?: iter->path,
+ k, flags);
+}
+
void bch2_trans_commit_hook(struct btree_trans *trans,
struct btree_trans_commit_hook *h)
{