struct btree_iter *iter;
};
+#define REPLICAS_DELTA_LIST_MAX (1U << 16)
+
struct bch_fs {
struct closure cl;
struct bch_replicas_cpu replicas;
struct bch_replicas_cpu replicas_gc;
struct mutex replicas_gc_lock;
+ mempool_t replicas_delta_pool;
struct journal_entry_res btree_root_journal_res;
struct journal_entry_res replicas_journal_res;
bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
- kfree(trans->fs_usage_deltas);
+ if (trans->fs_usage_deltas) {
+ if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
+ REPLICAS_DELTA_LIST_MAX)
+ mempool_free(trans->fs_usage_deltas,
+ &trans->c->replicas_delta_pool);
+ else
+ kfree(trans->fs_usage_deltas);
+ }
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
{
struct replicas_delta_list *d = trans->fs_usage_deltas;
unsigned new_size = d ? (d->size + more) * 2 : 128;
+ unsigned alloc_size = sizeof(*d) + new_size;
+
+ WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
if (!d || d->used + more > d->size) {
- d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
- BUG_ON(!d);
+ d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
+
+ BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
+
+ if (!d) {
+ d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
+ memset(d, 0, REPLICAS_DELTA_LIST_MAX);
+
+ if (trans->fs_usage_deltas)
+ memcpy(d, trans->fs_usage_deltas,
+ trans->fs_usage_deltas->size + sizeof(*d));
+
+ new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
+ kfree(trans->fs_usage_deltas);
+ }
d->size = new_size;
trans->fs_usage_deltas = d;
return ret;
}
+void bch2_fs_replicas_exit(struct bch_fs *c)
+{
+ unsigned i;
+
+ kfree(c->usage_scratch);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ free_percpu(c->usage[i]);
+ kfree(c->usage_base);
+ kfree(c->replicas.entries);
+ kfree(c->replicas_gc.entries);
+
+ mempool_exit(&c->replicas_delta_pool);
+}
+
int bch2_fs_replicas_init(struct bch_fs *c)
{
bch2_journal_entry_res_resize(&c->journal,
&c->replicas_journal_res,
reserve_journal_replicas(c, &c->replicas));
- return replicas_table_update(c, &c->replicas);
+ return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
+ REPLICAS_DELTA_LIST_MAX) ?:
+ replicas_table_update(c, &c->replicas);
}
extern const struct bch_sb_field_ops bch_sb_field_ops_replicas;
extern const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0;
+void bch2_fs_replicas_exit(struct bch_fs *);
int bch2_fs_replicas_init(struct bch_fs *);
#endif /* _BCACHEFS_REPLICAS_H */
bch2_fs_btree_iter_exit(c);
bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
bch2_fs_btree_cache_exit(c);
+ bch2_fs_replicas_exit(c);
bch2_fs_journal_exit(&c->journal);
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_journal_entries_free(&c->journal_entries);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
- kfree(c->usage_scratch);
- for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- free_percpu(c->usage[i]);
- kfree(c->usage_base);
if (c->btree_iters_bufs)
for_each_possible_cpu(cpu)
bioset_exit(&c->btree_bio);
mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes);
- kfree(c->replicas.entries);
- kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
kfree(c->journal_seq_blacklist_table);
kfree(c->unused_inode_hints);