bch2_btree_update_done(as);
for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++)
- bch2_foreground_maybe_merge(c, iter, l, flags);
+ ret = bch2_foreground_maybe_merge(c, iter, l, flags);
return ret;
}
-void __bch2_foreground_maybe_merge(struct bch_fs *c,
- struct btree_iter *iter,
- unsigned level,
- unsigned flags,
- enum btree_node_sibling sib)
+int __bch2_foreground_maybe_merge(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level,
+ unsigned flags,
+ enum btree_node_sibling sib)
{
struct btree_trans *trans = iter->trans;
struct btree_iter *sib_iter = NULL;
struct btree *b, *m, *n, *prev, *next, *parent;
struct bpos sib_pos;
size_t sib_u64s;
- int ret = 0;
-
- if (trans->nounlock)
- return;
+ int ret = 0, ret2 = 0;
BUG_ON(!btree_node_locked(iter, level));
retry:
* split path, and downgrading to read locks in there is potentially
* confusing:
*/
- return;
+ return ret ?: ret2;
err:
bch2_trans_iter_put(trans, sib_iter);
sib_iter = NULL;
- if (ret == -EINTR && bch2_trans_relock(trans)) {
- ret = 0;
+ if (ret == -EINTR && bch2_trans_relock(trans))
goto retry;
- }
if (ret == -EINTR && !(flags & BTREE_INSERT_NOUNLOCK)) {
+ ret2 = ret;
ret = bch2_btree_iter_traverse_all(trans);
if (!ret)
goto retry;
unsigned);
int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
-void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
- unsigned, unsigned, enum btree_node_sibling);
+int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
+ unsigned, unsigned, enum btree_node_sibling);
-static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
+static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
struct btree_iter *iter,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
struct btree *b;
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
- return;
+ return 0;
if (!bch2_btree_node_relock(iter, level))
- return;
+ return 0;
b = iter->l[level].b;
if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
- return;
+ return 0;
- __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
+ return __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
}
-static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
+static inline int bch2_foreground_maybe_merge(struct bch_fs *c,
struct btree_iter *iter,
unsigned level,
unsigned flags)
{
- bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
- btree_prev_sib);
- bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
- btree_next_sib);
+ return bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
+ btree_prev_sib) ?:
+ bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
+ btree_next_sib);
}
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);