#else
#error edit for your odd byteorder.
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
__u32 hi;
__u64 lo;
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
struct bkey {
/* Size of combined key and value, in u64s */
__u8 pad[1];
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bkey_packed {
__u64 _data[0];
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define BKEY_U64s_MAX U8_MAX
struct bch_csum {
__le64 lo;
__le64 hi;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_EXTENT_ENTRY_TYPES() \
x(ptr, 0) \
_compressed_size:7,
type:2;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0
type:3;
#endif
__u64 csum_lo;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1)
type:4;
#endif
struct bch_csum csum;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1)
cached:1,
type:1;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_extent_stripe_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 _data[0];
struct bch_extent_ptr start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_btree_ptr_v2 {
struct bch_val v;
struct bpos min_key;
__u64 _data[0];
struct bch_extent_ptr start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
__u64 _data[0];
union bch_extent_entry start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_reservation {
struct bch_val v;
__le32 generation;
__u8 nr_replicas;
__u8 pad[3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
__le32 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_inode_v2 {
struct bch_val v;
__le64 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_inode_generation {
struct bch_val v;
__le32 bi_generation;
__le32 pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* bi_subvol and bi_parent_subvol are only set for subvolume roots:
__u8 d_type;
__u8 d_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define DT_SUBVOL 16
#define BCH_DT_MAX 17
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Bucket/allocation information: */
__u8 fields;
__u8 gen;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V1() \
x(read_time, 16) \
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V2() \
x(read_time, 64) \
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_alloc_v4 {
struct bch_val v;
__u32 stripe;
__u32 nr_external_backpointers;
struct bpos backpointers[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
struct bch_quota {
struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Erasure coding */
__u8 pad;
struct bch_extent_ptr ptrs[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Reflink: */
*/
__le32 front_pad;
__le32 back_pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
struct bch_lru {
struct bch_val v;
__le64 idx;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define LRU_ID_STRIPES (1U << 16)
__u8 data_type;
__u8 nr_devs;
__u8 devs[];
-} __attribute__((packed));
+} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
__u8 devs[];
-} __attribute__((packed));
+} __packed;
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
struct bch_sb_field_replicas {
struct bch_sb_field field;
struct bch_replicas_entry entries[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
struct bch_sb_field_quota {
struct bch_sb_field field;
struct bch_sb_quota_type q[QTYP_NR];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_disk_groups: */
struct bch_disk_group {
__u8 label[BCH_SB_LABEL_SIZE];
__le64 flags[2];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
struct bch_disk_group entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_counters */
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_SB_LAYOUT_SECTOR 7
struct bch_sb_field start[0];
__le64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* Flags:
struct jset_entry_usage {
struct jset_entry entry;
__le64 v;
-} __attribute__((packed));
+} __packed;
struct jset_entry_data_usage {
struct jset_entry entry;
__le64 v;
struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
struct jset_entry_clock {
struct jset_entry entry;
__u8 rw;
__u8 pad[7];
__le64 time;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage_type {
__le64 buckets;
__le64 sectors;
__le64 fragmented;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage {
struct jset_entry entry;
__le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[];
-} __attribute__((packed));
+} __packed;
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{
struct jset_entry_log {
struct jset_entry entry;
u8 d[];
-} __attribute__((packed));
+} __packed;
/*
* On disk format for a journal entry:
struct jset_entry start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
struct bkey_packed start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#endif /* _BCACHEFS_FORMAT_H */