}
}
+void bch2_fs_usage_initialize(struct bch_fs *c)
+{
+ struct bch_fs_usage *usage;
+ unsigned i, nr;
+
+ percpu_down_write(&c->mark_lock);
+ nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
+ usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ switch (e->data_type) {
+ case BCH_DATA_BTREE:
+ case BCH_DATA_USER:
+ usage->s.data += usage->data[i];
+ break;
+ case BCH_DATA_CACHED:
+ usage->s.cached += usage->data[i];
+ break;
+ }
+ }
+
+ percpu_up_write(&c->mark_lock);
+}
+
#define bch2_usage_read_raw(_stats) \
({ \
typeof(*this_cpu_ptr(_stats)) _acc; \
/* key/bucket marking: */
void bch2_bucket_seq_cleanup(struct bch_fs *);
+void bch2_fs_usage_initialize(struct bch_fs *);
void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
size_t, struct bucket_mark *);
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_io.h"
+#include "buckets.h"
#include "dirent.h"
#include "ec.h"
#include "error.h"
}
}
+ bch2_fs_usage_initialize(c);
+
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_root *r = &c->btree_roots[i];
bch_notice(c, "initializing new filesystem");
+ mutex_lock(&c->sb_lock);
+ for_each_online_member(ca, c, i)
+ bch2_mark_dev_superblock(c, ca, 0);
+ mutex_unlock(&c->sb_lock);
+
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
for (i = 0; i < BTREE_ID_NR; i++)
if (ret)
return ret;
- mutex_lock(&c->sb_lock);
- bch2_mark_dev_superblock(ca->fs, ca, 0);
- mutex_unlock(&c->sb_lock);
+ if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
+ !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) {
+ mutex_lock(&c->sb_lock);
+ bch2_mark_dev_superblock(ca->fs, ca, 0);
+ mutex_unlock(&c->sb_lock);
+ }
bch2_dev_sysfs_online(c, ca);