}
}
- /*
- * XXX: not allowed to be holding read locks when we take a write lock,
- * currently
- */
- bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+ ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+ if (ret) {
+ kfree(new_k);
+ goto err;
+ }
+
if (new_k) {
kfree(ck->k);
ck->u64s = new_u64s;
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
-static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
+static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
struct btree *b)
{
__bch2_btree_node_lock_write(trans, b);
}
+static inline int __must_check
+bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
+{
+ bch2_btree_node_lock_write_nofail(trans, path, b);
+ return 0;
+}
+
/* relock: */
bool bch2_btree_path_relock_norestart(struct btree_trans *,
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write(trans, path, old);
+ bch2_btree_node_lock_write_nofail(trans, path, old);
bch2_btree_set_root_inmem(c, b);
if (ret)
goto err;
- bch2_btree_node_lock_write(trans, iter->path, b);
+ bch2_btree_node_lock_write_nofail(trans, iter->path, b);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
struct btree_path *path,
struct btree *b)
{
- bch2_btree_node_lock_write(trans, path, b);
+ bch2_btree_node_lock_write_nofail(trans, path, b);
bch2_btree_node_prep_for_write(trans, path, b);
}