struct bch_replicas_cpu replicas_gc;
struct mutex replicas_gc_lock;
+ struct journal_entry_res btree_root_journal_res;
struct journal_entry_res replicas_journal_res;
-
+ struct journal_entry_res clock_journal_res;
struct journal_entry_res dev_usage_journal_res;
struct bch_disk_groups_cpu __rcu *disk_groups;
j->write_delay_ms = 1000;
j->reclaim_delay_ms = 100;
- /* Btree roots: */
- j->entry_u64s_reserved +=
- BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX);
-
- j->entry_u64s_reserved +=
- 2 * (sizeof(struct jset_entry_clock) / sizeof(u64));
-
atomic64_set(&j->reservations.counter,
((union journal_res_state)
{ .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
int bch2_fs_replicas_init(struct bch_fs *c)
{
- c->journal.entry_u64s_reserved +=
- reserve_journal_replicas(c, &c->replicas);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->replicas_journal_res,
+ reserve_journal_replicas(c, &c->replicas));
return replicas_table_update(c, &c->replicas);
}
{
struct bch_dev *ca;
unsigned i, nr = 0, u64s =
- (sizeof(struct jset_entry_dev_usage) +
- sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR);
+ ((sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
+ sizeof(u64);
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL)
bch2_fs_fsio_init(c))
goto err;
- bch2_dev_usage_journal_reserve(c);
-
mi = bch2_sb_get_members(c->disk_sb.sb);
for (i = 0; i < c->sb.nr_devices; i++)
if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&
bch2_dev_alloc(c, i))
goto err;
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->btree_root_journal_res,
+ BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
+ bch2_dev_usage_journal_reserve(c);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->clock_journal_res,
+ (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
+
mutex_lock(&bch_fs_list_lock);
err = bch2_fs_online(c);
mutex_unlock(&bch_fs_list_lock);