struct btree_path_buf __percpu *btree_paths_bufs;
struct srcu_struct btree_trans_barrier;
+ bool btree_trans_barrier_initialized;
struct btree_key_cache btree_key_cache;
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
+ if (c->btree_trans_barrier_initialized)
+ cleanup_srcu_struct(&c->btree_trans_barrier);
mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_paths_pool);
- cleanup_srcu_struct(&c->btree_trans_barrier);
}
int bch2_fs_btree_iter_init(struct bch_fs *c)
{
unsigned nr = BTREE_ITER_MAX;
+ int ret;
INIT_LIST_HEAD(&c->btree_trans_list);
mutex_init(&c->btree_trans_lock);
- return init_srcu_struct(&c->btree_trans_barrier) ?:
- mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
+ ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +
sizeof(struct btree_insert_entry) * nr) ?:
mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
- BTREE_TRANS_MEM_MAX);
+ BTREE_TRANS_MEM_MAX) ?:
+ init_srcu_struct(&c->btree_trans_barrier);
+ if (!ret)
+ c->btree_trans_barrier_initialized = true;
+ return ret;
}
rcu_read_lock();
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
- bkey_cached_evict(bc, ck);
- list_add(&ck->list, &bc->freed);
- }
+ if (tbl)
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ bkey_cached_evict(bc, ck);
+ list_add(&ck->list, &bc->freed);
+ }
rcu_read_unlock();
list_for_each_entry_safe(ck, n, &bc->freed, list) {