__le64 bi_hash_seed;
__le32 bi_flags;
__le16 bi_mode;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
struct bch_inode_v2 {
__le64 bi_hash_seed;
__le64 bi_flags;
__le16 bi_mode;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
struct bch_inode_v3 {
__le64 bi_sectors;
__le64 bi_size;
__le64 bi_version;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
#define INODEv3_FIELDS_START_INITIAL 6
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
- __u64 _data[0];
+ __u64 _data[];
} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
__le64 refcount;
- u8 data[0];
+ u8 data[];
};
/* Inline data */
struct bch_inline_data {
struct bch_val v;
- u8 data[0];
+ u8 data[];
};
/* Subvolumes: */
struct bch_sb_field_journal {
struct bch_sb_field field;
- __le64 buckets[0];
+ __le64 buckets[];
};
struct bch_sb_field_journal_v2 {
struct bch_sb_field_journal_v2_entry {
__le64 start;
__le64 nr;
- } d[0];
+ } d[];
};
/* BCH_SB_FIELD_members: */
struct bch_sb_field_members {
struct bch_sb_field field;
- struct bch_member members[0];
+ struct bch_member members[];
};
/* BCH_SB_FIELD_crypt: */
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[0];
+ __u8 devs[];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[0];
+ struct bch_replicas_entry_v0 entries[];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[0];
+ __u8 devs[];
} __packed;
#define replicas_entry_bytes(_i) \
struct bch_sb_field_replicas {
struct bch_sb_field field;
- struct bch_replicas_entry entries[0];
+ struct bch_replicas_entry entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
- struct bch_disk_group entries[0];
+ struct bch_disk_group entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_counters */
struct bch_sb_field_counters {
struct bch_sb_field field;
- __le64 d[0];
+ __le64 d[];
};
/*
__u8 type; /* designates what this jset holds */
__u8 pad[3];
- union {
- struct bkey_i start[0];
- __u64 _data[0];
- };
+ struct bkey_i start[0];
+ __u64 _data[];
};
struct bch_sb_field_clean {
__le16 _write_clock;
__le64 journal_seq;
- union {
- struct jset_entry start[0];
- __u64 _data[0];
- };
+ struct jset_entry start[0];
+ __u64 _data[];
};
struct journal_seq_blacklist_entry {
struct bch_sb_field_journal_seq_blacklist {
struct bch_sb_field field;
- union {
- struct journal_seq_blacklist_entry start[0];
- __u64 _data[0];
- };
+ struct journal_seq_blacklist_entry start[0];
+ __u64 _data[];
};
/* Superblock: */
struct bch_sb_layout layout;
- union {
- struct bch_sb_field start[0];
- __le64 _data[0];
- };
+ struct bch_sb_field start[0];
+ __le64 _data[];
} __packed __aligned(8);
/*
__le64 last_seq;
- union {
- struct jset_entry start[0];
- __u64 _data[0];
- };
+ struct jset_entry start[0];
+ __u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
__le16 version;
__le16 u64s; /* count of d[] in u64s */
- union {
- struct bkey_packed start[0];
- __u64 _data[0];
- };
+ struct bkey_packed start[0];
+ __u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
struct bkey_packed *k)
{
EBUG_ON(state->p < k->_data);
- EBUG_ON(state->p >= k->_data + state->format->key_u64s);
+ EBUG_ON(state->p >= (u64 *) k->_data + state->format->key_u64s);
*state->p = state->w;
}
static inline struct bkey_i *bkey_next(struct bkey_i *k)
{
- return (struct bkey_i *) (k->_data + k->k.u64s);
+ return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s);
}
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
}
#define bkeyp_val(_format, _k) \
- ((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k)))
+ ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k)))
extern const struct bkey_format bch2_bkey_format_current;
#error edit for your odd byteorder.
#endif
-#define high_word(f, k) ((k)->_data + high_word_offset(f))
+#define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f))
#define next_word(p) nth_word(p, 1)
#define prev_word(p) nth_word(p, -1)
struct sort_iter_set {
struct bkey_packed *k, *end;
- } data[MAX_BSETS + 1];
+ } data[];
};
-static inline void sort_iter_init(struct sort_iter *iter, struct btree *b)
+static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size)
{
iter->b = b;
iter->used = 0;
- iter->size = ARRAY_SIZE(iter->data);
+ iter->size = size;
+}
+
+struct sort_iter_stack {
+ struct sort_iter iter;
+ struct sort_iter_set sets[MAX_BSETS + 1];
+};
+
+static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b)
+{
+ sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets));
}
static inline void sort_iter_add(struct sort_iter *iter,
{
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
- struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
+ struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
#if 0
}
struct ro_aux_tree {
- struct bkey_float f[0];
+ u8 nothing[0];
+ struct bkey_float f[];
};
struct rw_aux_tree {
{
unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
- return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
+ return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
}
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
btree_keys_account_key_add(&b->nr, t - b->set, src);
if (src->u64s != clobber_u64s) {
- u64 *src_p = where->_data + clobber_u64s;
- u64 *dst_p = where->_data + src->u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
+ u64 *dst_p = (u64 *) where->_data + src->u64s;
EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
(int) clobber_u64s - src->u64s);
unsigned clobber_u64s)
{
struct bset_tree *t = bset_tree_last(b);
- u64 *src_p = where->_data + clobber_u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
u64 *dst_p = where->_data;
bch2_bset_verify_rw_aux_tree(b, t);
bool filter_whiteouts)
{
struct btree_node *out;
- struct sort_iter sort_iter;
+ struct sort_iter_stack sort_iter;
struct bset_tree *t;
struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
bool sorting_entire_node = start_idx == 0 &&
end_idx == b->nsets;
- sort_iter_init(&sort_iter, b);
+ sort_iter_stack_init(&sort_iter, b);
for (t = b->set + start_idx;
t < b->set + end_idx;
t++) {
u64s += le16_to_cpu(bset(b, t)->u64s);
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
btree_bkey_first(b, t),
btree_bkey_last(b, t));
}
start_time = local_clock();
- u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
+ u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
out->keys.u64s = cpu_to_le16(u64s);
b->written = 0;
iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
- sort_iter_init(iter, b);
- iter->size = (btree_blocks(c) + 1) * 2;
+ sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
if (bch2_meta_read_fault("btree"))
btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
- struct sort_iter sort_iter;
+ struct sort_iter_stack sort_iter;
struct nonce nonce;
unsigned bytes_to_write, sectors_to_write, bytes, u64s;
u64 seq = 0;
bch2_sort_whiteouts(c, b);
- sort_iter_init(&sort_iter, b);
+ sort_iter_stack_init(&sort_iter, b);
bytes = !b->written
? sizeof(struct btree_node)
continue;
bytes += le16_to_cpu(i->u64s) * sizeof(u64);
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
btree_bkey_first(b, t),
btree_bkey_last(b, t));
seq = max(seq, le64_to_cpu(i->journal_seq));
i->journal_seq = cpu_to_le64(seq);
i->u64s = 0;
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
unwritten_whiteouts_start(c, b),
unwritten_whiteouts_end(c, b));
SET_BSET_SEPARATE_WHITEOUTS(i, false);
b->whiteout_u64s = 0;
- u64s = bch2_sort_keys(i->start, &sort_iter, false);
+ u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
le16_add_cpu(&i->u64s, u64s);
BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
(round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9)
#define vstruct_next(_s) \
- ((typeof(_s)) ((_s)->_data + __vstruct_u64s(_s)))
+ ((typeof(_s)) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_last(_s) \
- ((typeof(&(_s)->start[0])) ((_s)->_data + __vstruct_u64s(_s)))
+ ((typeof(&(_s)->start[0])) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_end(_s) \
- ((void *) ((_s)->_data + __vstruct_u64s(_s)))
+ ((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_for_each(_s, _i) \
for (_i = (_s)->start; \