{
struct btree *b = path->l[level].b;
- EBUG_ON(btree_lock_want(path, level) != BTREE_NODE_INTENT_LOCKED);
-
if (!is_btree_node(path, level))
return false;
+ switch (btree_lock_want(path, level)) {
+ case BTREE_NODE_UNLOCKED:
+ BUG_ON(btree_node_locked(path, level));
+ return true;
+ case BTREE_NODE_READ_LOCKED:
+ BUG_ON(btree_node_intent_locked(path, level));
+ return bch2_btree_node_relock(trans, path, level);
+ case BTREE_NODE_INTENT_LOCKED:
+ break;
+ }
+
if (btree_node_intent_locked(path, level))
return true;
unsigned l;
if (!path->nodes_locked) {
- BUG_ON(path->uptodate == BTREE_ITER_UPTODATE);
+ BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
+ btree_path_node(path, path->level));
return;
}
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
- if (path->nodes_locked)
+ if (path->nodes_locked ||
+ !btree_path_node(path, path->level))
i++;
}
{
struct bch_fs *c = trans->c;
struct btree *new_hash = NULL;
+ struct btree_path *path = iter->path;
struct closure cl;
int ret = 0;
+ if (!btree_node_intent_locked(path, b->c.level) &&
+ !bch2_btree_path_upgrade(trans, path, b->c.level + 1)) {
+ btree_trans_restart(trans);
+ return -EINTR;
+ }
+
closure_init_stack(&cl);
/*
new_hash = bch2_btree_node_mem_alloc(c);
}
+ path->intent_ref++;
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
new_key, skip_triggers);
+ --path->intent_ref;
if (new_hash) {
mutex_lock(&c->btree_cache.lock);