bcachefs: Split out btree_node_rewrite_worker
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 18 Mar 2024 00:25:39 +0000 (20:25 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 18 Mar 2024 00:53:12 +0000 (20:53 -0400)
This fixes a deadlock due to using btree_interior_update_worker for non
interior updates - async btree node rewrites were blocking, and then
blocking other interior updates.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_update_interior.c

index 339dc3e1dcd39939b5f021db2665190ea07ceee9..799aa32b6b4d990f913b0d5dfb98b6a47af1f0b2 100644 (file)
@@ -849,6 +849,8 @@ struct bch_fs {
        struct workqueue_struct *btree_interior_update_worker;
        struct work_struct      btree_interior_update_work;
 
+       struct workqueue_struct *btree_node_rewrite_worker;
+
        struct list_head        pending_node_rewrites;
        struct mutex            pending_node_rewrites_lock;
 
index 642213ef9f798e477bc902e4977be9cb813aab56..5b2094458df7c73f575ab2ac11d2e32c88ed4e5f 100644 (file)
@@ -2161,7 +2161,7 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
                bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
        }
 
-       queue_work(c->btree_interior_update_worker, &a->work);
+       queue_work(c->btree_node_rewrite_worker, &a->work);
 }
 
 void bch2_do_pending_node_rewrites(struct bch_fs *c)
@@ -2173,7 +2173,7 @@ void bch2_do_pending_node_rewrites(struct bch_fs *c)
                list_del(&a->list);
 
                bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
-               queue_work(c->btree_interior_update_worker, &a->work);
+               queue_work(c->btree_node_rewrite_worker, &a->work);
        }
        mutex_unlock(&c->pending_node_rewrites_lock);
 }
@@ -2510,6 +2510,8 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c,
 
 void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
 {
+       if (c->btree_node_rewrite_worker)
+               destroy_workqueue(c->btree_node_rewrite_worker);
        if (c->btree_interior_update_worker)
                destroy_workqueue(c->btree_interior_update_worker);
        mempool_exit(&c->btree_interior_update_pool);
@@ -2534,6 +2536,11 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c)
        if (!c->btree_interior_update_worker)
                return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
 
+       c->btree_node_rewrite_worker =
+               alloc_ordered_workqueue("btree_node_rewrite", WQ_UNBOUND);
+       if (!c->btree_node_rewrite_worker)
+               return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
+
        if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
                                      sizeof(struct btree_update)))
                return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;