if (size > trans->mem_bytes) {
size_t old_bytes = trans->mem_bytes;
size_t new_bytes = roundup_pow_of_two(size);
- void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ void *new_mem;
+
+ WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+ new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_bytes = BTREE_TRANS_MEM_MAX;
+ kfree(trans->mem);
+ }
if (!new_mem)
return -ENOMEM;
if (expected_mem_bytes) {
expected_mem_bytes = roundup_pow_of_two(expected_mem_bytes);
trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
- if (trans->mem)
+
+ if (!unlikely(trans->mem)) {
+ trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
+ trans->mem_bytes = BTREE_TRANS_MEM_MAX;
+ } else {
trans->mem_bytes = expected_mem_bytes;
+ }
}
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
kfree(trans->fs_usage_deltas);
- kfree(trans->mem);
+
+ if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
+ else
+ kfree(trans->mem);
#ifdef __KERNEL__
/*
*/
trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
#endif
+
if (trans->iters)
mempool_free(trans->iters, &trans->c->btree_iters_pool);
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
+ mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_iters_pool);
cleanup_srcu_struct(&c->btree_trans_barrier);
}
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
sizeof(struct btree_iter) * nr +
sizeof(struct btree_insert_entry) * nr +
- sizeof(struct btree_insert_entry) * nr);
+ sizeof(struct btree_insert_entry) * nr) ?:
+ mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
+ BTREE_TRANS_MEM_MAX);
}