bcachefs: bch2_btree_node_lock_write_nofail()
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 23 Aug 2022 03:39:23 +0000 (23:39 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:40 +0000 (17:09 -0400)
Taking a write lock will be able to fail, with the new cycle detector -
unless we pass it nofail, which is possible but not preferred.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_leaf.c

index 38a66302d6e9f84bcb7d3fe06baa9df0aafac97d..94979b1a4912ce67c82ee73ec4e9dabba3e3fd24 100644 (file)
@@ -342,11 +342,12 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                }
        }
 
-       /*
-        * XXX: not allowed to be holding read locks when we take a write lock,
-        * currently
-        */
-       bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+       ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+       if (ret) {
+               kfree(new_k);
+               goto err;
+       }
+
        if (new_k) {
                kfree(ck->k);
                ck->u64s = new_u64s;
index 32c28c1341e977af42bced5f76cda58c6b438459..6eaf44fd3f37b5acc0486acc8181d0c66677ec31 100644 (file)
@@ -281,7 +281,7 @@ static inline int btree_node_lock(struct btree_trans *trans,
 
 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
 
-static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
+static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
                                              struct btree_path *path,
                                              struct btree *b)
 {
@@ -300,6 +300,15 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
                __bch2_btree_node_lock_write(trans, b);
 }
 
+static inline int __must_check
+bch2_btree_node_lock_write(struct btree_trans *trans,
+                          struct btree_path *path,
+                          struct btree *b)
+{
+       bch2_btree_node_lock_write_nofail(trans, path, b);
+       return 0;
+}
+
 /* relock: */
 
 bool bch2_btree_path_relock_norestart(struct btree_trans *,
index 6fe49766c6c8424c0a41f9f68e2f984171ed1fc5..db45883d27ceb92df9d2dd1e2ea8eeab2bf43582 100644 (file)
@@ -1163,7 +1163,7 @@ static void bch2_btree_set_root(struct btree_update *as,
         * Ensure no one is using the old root while we switch to the
         * new root:
         */
-       bch2_btree_node_lock_write(trans, path, old);
+       bch2_btree_node_lock_write_nofail(trans, path, old);
 
        bch2_btree_set_root_inmem(c, b);
 
@@ -2002,7 +2002,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
        if (ret)
                goto err;
 
-       bch2_btree_node_lock_write(trans, iter->path, b);
+       bch2_btree_node_lock_write_nofail(trans, iter->path, b);
 
        if (new_hash) {
                mutex_lock(&c->btree_cache.lock);
index d414cbefa3c950e115a043bf35c2478d67e937b3..3efec0b304669ad3ac1e910c9ff1d97ae1080cd3 100644 (file)
@@ -81,7 +81,7 @@ void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
                                     struct btree_path *path,
                                     struct btree *b)
 {
-       bch2_btree_node_lock_write(trans, path, b);
+       bch2_btree_node_lock_write_nofail(trans, path, b);
        bch2_btree_node_prep_for_write(trans, path, b);
 }