__u64 mem_ptr;
__le64 seq;
__le16 sectors_written;
- /* In case we ever decide to do variable size btree nodes: */
- __le16 sectors;
+ __le16 flags;
struct bpos min_key;
__u64 _data[0];
struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8)));
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
+
struct bch_extent {
struct bch_val v;
struct bch_btree_ptr_v2 *bp =
&bkey_i_to_btree_ptr_v2(&b->key)->v;
+ if (BTREE_PTR_RANGE_UPDATED(bp)) {
+ b->data->min_key = bp->min_key;
+ b->data->max_key = b->key.k.p;
+ }
+
btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
BTREE_ERR_MUST_RETRY, c, b, NULL,
"incorrect min_key: got %llu:%llu should be %llu:%llu",
bp->v.mem_ptr = 0;
bp->v.seq = b->data->keys.seq;
bp->v.sectors_written = 0;
- bp->v.sectors = cpu_to_le16(c->opts.btree_node_size);
}
if (c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))
{
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- pr_buf(out, "seq %llx sectors %u written %u min_key ",
+ pr_buf(out, "seq %llx written %u min_key ",
le64_to_cpu(bp.v->seq),
- le16_to_cpu(bp.v->sectors),
le16_to_cpu(bp.v->sectors_written));
bch2_bpos_to_text(out, bp.v->min_key);
unsigned nonce = UINT_MAX;
unsigned i;
- if (k.k->type == KEY_TYPE_btree_ptr)
+ if (k.k->type == KEY_TYPE_btree_ptr ||
+ k.k->type == KEY_TYPE_btree_ptr_v2)
size_ondisk = c->opts.btree_node_size;
- if (k.k->type == KEY_TYPE_btree_ptr_v2)
- size_ondisk = le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors);
bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)