ret = bch2_btree_node_get(c, iter, &tmp.k, level,
SIX_LOCK_intent);
- if (PTR_ERR_OR_ZERO(ret) == -EINTR &&
- !(iter->flags & BTREE_ITER_NOUNLOCK)) {
+ if (PTR_ERR_OR_ZERO(ret) == -EINTR && !trans->nounlock) {
struct btree_iter *linked;
if (!bch2_btree_node_relock(iter, level + 1))
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- if (!(iter->flags & BTREE_ITER_NOUNLOCK)) {
+ if (!(iter->trans->nounlock)) {
linked->locks_want = max_t(unsigned,
linked->locks_want,
__fls(linked->nodes_locked) + 1);
*/
if (linked->btree_id == iter->btree_id &&
level > __fls(linked->nodes_locked)) {
- if (!(iter->flags & BTREE_ITER_NOUNLOCK)) {
+ if (!(iter->trans->nounlock)) {
linked->locks_want =
max(level + 1, max_t(unsigned,
linked->locks_want,
{
unsigned l;
- BUG_ON((iter->flags & BTREE_ITER_NOUNLOCK) &&
- !btree_node_locked(iter, 0));
-
for (l = 0; btree_iter_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
return iter->locks_want < new_locks_want
- ? (!(iter->flags & BTREE_ITER_NOUNLOCK)
+ ? (!iter->trans->nounlock
? __bch2_btree_iter_upgrade(iter, new_locks_want)
: __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
: iter->uptodate <= BTREE_ITER_NEED_PEEK;
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
- BUG_ON(!level && iter->flags & BTREE_ITER_NOUNLOCK);
+ EBUG_ON(!level && iter->trans->nounlock);
__btree_node_unlock(iter, level);
}
*/
#define BTREE_ITER_IS_EXTENTS (1 << 4)
#define BTREE_ITER_ERROR (1 << 5)
-#define BTREE_ITER_NOUNLOCK (1 << 6)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
u8 size;
unsigned used_mempool:1;
unsigned error:1;
+ unsigned nounlock:1;
unsigned mem_top;
unsigned mem_bytes;
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
- struct btree_iter *linked;
int ret;
if (likely(!(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS))) {
i->k->k.version = MAX_VERSION;
}
- if (trans->flags & BTREE_INSERT_NOUNLOCK) {
- /*
- * linked iterators that weren't being updated may or may not
- * have been traversed/locked, depending on what the caller was
- * doing:
- */
- trans_for_each_iter(trans, linked)
- if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
- linked->flags |= BTREE_ITER_NOUNLOCK;
- }
-
trans_for_each_update_iter(trans, i)
if (update_has_triggers(trans, i) &&
!update_triggers_transactional(trans, i))
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
- struct btree_iter *linked;
int ret;
trans_for_each_update_iter(trans, i) {
if (unlikely(ret))
goto err;
+ if (trans->flags & BTREE_INSERT_NOUNLOCK)
+ trans->nounlock = true;
+
trans_for_each_update_leaf(trans, i)
bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
+ trans->nounlock = false;
+
trans_for_each_update_iter(trans, i)
bch2_btree_iter_downgrade(i->iter);
err:
/* make sure we didn't drop or screw up locks: */
bch2_btree_trans_verify_locks(trans);
- trans_for_each_iter(trans, linked)
- linked->flags &= ~BTREE_ITER_NOUNLOCK;
-
return ret;
}