x(prio_ptrs,            2)              \
        x(blacklist,            3)              \
        x(blacklist_v2,         4)              \
-       x(usage,                5)
+       x(usage,                5)              \
+       x(data_usage,           6)
 
 enum {
 #define x(f, nr)       BCH_JSET_ENTRY_##f      = nr,
 };
 
 enum {
-       FS_USAGE_REPLICAS               = 0,
+       FS_USAGE_RESERVED               = 0,
        FS_USAGE_INODES                 = 1,
        FS_USAGE_KEY_VERSION            = 2,
        FS_USAGE_NR                     = 3
 
 struct jset_entry_usage {
        struct jset_entry       entry;
-       __le64                  sectors;
-       __u8                    type;
+       __le64                  v;
+} __attribute__((packed));
+
+struct jset_entry_data_usage {
+       struct jset_entry       entry;
+       __le64                  v;
        struct bch_replicas_entry r;
 } __attribute__((packed));
 
 
        nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
        usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
 
+       for (i = 0; i < BCH_REPLICAS_MAX; i++)
+               usage->s.reserved += usage->persistent_reserved[i];
+
        for (i = 0; i < c->replicas.nr; i++) {
                struct bch_replicas_entry *e =
                        cpu_replicas_entry(&c->replicas, i);
 
        unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
        int ret = 0;
 
+       if (journal_entry_err_on(bytes < sizeof(*u),
+                                c,
+                                "invalid journal entry usage: bad size")) {
+               journal_entry_null_range(entry, vstruct_next(entry));
+               return ret;
+       }
+
+fsck_err:
+       return ret;
+}
+
+static int journal_entry_validate_data_usage(struct bch_fs *c,
+                                       struct jset *jset,
+                                       struct jset_entry *entry,
+                                       int write)
+{
+       struct jset_entry_data_usage *u =
+               container_of(entry, struct jset_entry_data_usage, entry);
+       unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+       int ret = 0;
+
        if (journal_entry_err_on(bytes < sizeof(*u) ||
                                 bytes < sizeof(*u) + u->r.nr_devs,
                                 c,
 
                struct jset_entry_usage *u =
                        container_of(entry, struct jset_entry_usage, entry);
 
-               switch (u->type) {
-               case FS_USAGE_REPLICAS:
-                       ret = bch2_replicas_set_usage(c, &u->r,
-                                               le64_to_cpu(u->sectors));
+               switch (entry->btree_id) {
+               case FS_USAGE_RESERVED:
+                       if (entry->level < BCH_REPLICAS_MAX)
+                               percpu_u64_set(&c->usage[0]->
+                                              persistent_reserved[entry->level],
+                                              le64_to_cpu(u->v));
                        break;
                case FS_USAGE_INODES:
                        percpu_u64_set(&c->usage[0]->s.nr_inodes,
-                                               le64_to_cpu(u->sectors));
+                                      le64_to_cpu(u->v));
                        break;
                case FS_USAGE_KEY_VERSION:
                        atomic64_set(&c->key_version,
-                                    le64_to_cpu(u->sectors));
+                                    le64_to_cpu(u->v));
                        break;
                }
 
                break;
        }
+       case BCH_JSET_ENTRY_data_usage: {
+               struct jset_entry_data_usage *u =
+                       container_of(entry, struct jset_entry_data_usage, entry);
+               ret = bch2_replicas_set_usage(c, &u->r,
+                                             le64_to_cpu(u->v));
+               break;
+       }
        }
 
        return ret;
        list_for_each_entry(i, journal, list) {
                vstruct_for_each(&i->j, entry) {
                        if (entry->type == BCH_JSET_ENTRY_btree_root ||
-                           entry->type == BCH_JSET_ENTRY_usage)
+                           entry->type == BCH_JSET_ENTRY_usage ||
+                           entry->type == BCH_JSET_ENTRY_data_usage)
                                continue;
 
                        if (entry->type == BCH_JSET_ENTRY_btree_keys &&
 
        journal_res_u64s +=
                DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
 
+       /* persistent_reserved: */
+       journal_res_u64s +=
+               DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
+               BCH_REPLICAS_MAX;
+
        for_each_cpu_replicas_entry(r, e)
                journal_res_u64s +=
-                       DIV_ROUND_UP(sizeof(struct jset_entry_usage) +
+                       DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
                                     e->nr_devs, sizeof(u64));
        return journal_res_u64s;
 }
 
                                      struct jset_entry *entry,
                                      u64 journal_seq)
 {
-       struct jset_entry_usage *u;
        struct btree_root *r;
        unsigned i;
 
 
        {
                u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
+               struct jset_entry_usage *u =
+                       container_of(entry, struct jset_entry_usage, entry);
 
-               u = container_of(entry, struct jset_entry_usage, entry);
                memset(u, 0, sizeof(*u));
                u->entry.u64s   = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
                u->entry.type   = BCH_JSET_ENTRY_usage;
-               u->sectors      = cpu_to_le64(nr_inodes);
-               u->type         = FS_USAGE_INODES;
+               u->entry.btree_id = FS_USAGE_INODES;
+               u->v            = cpu_to_le64(nr_inodes);
 
                entry = vstruct_next(entry);
        }
 
        {
-               u = container_of(entry, struct jset_entry_usage, entry);
+               struct jset_entry_usage *u =
+                       container_of(entry, struct jset_entry_usage, entry);
+
                memset(u, 0, sizeof(*u));
                u->entry.u64s   = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
                u->entry.type   = BCH_JSET_ENTRY_usage;
-               u->sectors      = cpu_to_le64(atomic64_read(&c->key_version));
-               u->type         = FS_USAGE_KEY_VERSION;
+               u->entry.btree_id = FS_USAGE_KEY_VERSION;
+               u->v            = cpu_to_le64(atomic64_read(&c->key_version));
+
+               entry = vstruct_next(entry);
+       }
+
+       for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+               struct jset_entry_usage *u =
+                       container_of(entry, struct jset_entry_usage, entry);
+               u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
+
+               if (!sectors)
+                       continue;
+
+               memset(u, 0, sizeof(*u));
+               u->entry.u64s   = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
+               u->entry.type   = BCH_JSET_ENTRY_usage;
+               u->entry.btree_id = FS_USAGE_RESERVED;
+               u->entry.level  = i;
+               u->v            = sectors;
 
                entry = vstruct_next(entry);
        }
                struct bch_replicas_entry *e =
                        cpu_replicas_entry(&c->replicas, i);
                u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
+               struct jset_entry_data_usage *u =
+                       container_of(entry, struct jset_entry_data_usage, entry);
 
-               u = container_of(entry, struct jset_entry_usage, entry);
+               memset(u, 0, sizeof(*u));
                u->entry.u64s   = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
                                               sizeof(u64)) - 1;
-               u->entry.type   = BCH_JSET_ENTRY_usage;
-               u->sectors      = cpu_to_le64(sectors);
-               u->type         = FS_USAGE_REPLICAS;
+               u->entry.type   = BCH_JSET_ENTRY_data_usage;
+               u->v            = cpu_to_le64(sectors);
                unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
                              "embedded variable length struct");