bcachefs: Ensure intent locks are marked before taking write locks
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 4 Sep 2022 02:24:16 +0000 (22:24 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:40 +0000 (17:09 -0400)
Locks must be correctly marked for the cycle detector to work.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_key_cache.c

index 0f54db0c1b8ae894587ddda89b1dffe84b7a5473..977c523359a56b634e8deb4bf87f690c830fd2f3 100644 (file)
@@ -214,9 +214,13 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
                        return ERR_PTR(ret);
                }
 
-               ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_write);
+               path->l[0].b = (void *) ck;
+               path->l[0].lock_seq = ck->c.lock.state.seq;
+               mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
+
+               ret = bch2_btree_node_lock_write(trans, path, &ck->c);
                if (unlikely(ret)) {
-                       six_unlock_intent(&ck->c.lock);
+                       btree_node_unlock(trans, path, 0);
                        bkey_cached_move_to_freelist(bc, ck);
                        return ERR_PTR(ret);
                }
@@ -285,6 +289,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                        return ERR_PTR(-ENOMEM);
                }
 
+               mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
                was_new = false;
        } else {
                if (path->btree_id == BTREE_ID_subvolumes)
@@ -311,6 +316,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                        bkey_cached_free_fast(bc, ck);
                }
 
+               mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
                return NULL;
        }