struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{
+ unsigned offset = __btree_node_key_to_offset(b, k);
struct bset_tree *t;
for_each_bset(b, t)
- if (k >= btree_bkey_first(b, t) &&
- k < btree_bkey_last(b, t))
+ if (offset <= t->end_offset) {
+ EBUG_ON(offset < btree_bkey_first_offset(t));
return t;
+ }
BUG();
}
void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
struct btree *b)
{
- struct btree_node_iter_set *set, *prev = NULL;
+ struct btree_node_iter_set *set, *s2;
struct bset_tree *t;
- struct bkey_packed *k, *first;
- if (bch2_btree_node_iter_end(iter))
- return;
+ /* Verify no duplicates: */
+ btree_node_iter_for_each(iter, set)
+ btree_node_iter_for_each(iter, s2)
+ BUG_ON(set != s2 && set->end == s2->end);
+ /* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) {
- k = __btree_node_offset_to_key(b, set->k);
- t = bch2_bkey_to_bset(b, k);
-
- BUG_ON(__btree_node_offset_to_key(b, set->end) !=
- btree_bkey_last(b, t));
-
- BUG_ON(prev &&
- btree_node_iter_cmp(iter, b, *prev, *set) > 0);
-
- prev = set;
+ for_each_bset(b, t)
+ if (set->end == t->end_offset)
+ goto found;
+ BUG();
+found:
+ BUG_ON(set->k < btree_bkey_first_offset(t) ||
+ set->k >= t->end_offset);
}
- first = __btree_node_offset_to_key(b, iter->data[0].k);
-
- for_each_bset(b, t)
- if (bch2_btree_node_iter_bset_pos(iter, b, t) ==
- btree_bkey_last(b, t) &&
- (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
- BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
- k, first) > 0);
+ /* Verify iterator is sorted: */
+ btree_node_iter_for_each(iter, set)
+ BUG_ON(set != iter->data &&
+ btree_node_iter_cmp(iter, b, set[-1], set[0]) > 0);
}
void bch2_verify_key_order(struct btree *b,
if (mode == COMPACT_LAZY) {
if (should_compact_bset_lazy(b, t) ||
- (compacting && bset_unwritten(b, bset(b, t))))
+ (compacting && !bset_written(b, bset(b, t))))
return dead_u64s;
} else {
if (bset_written(b, bset(b, t)))
struct bkey_packed *k, *n, *out, *start, *end;
struct btree_node_entry *src = NULL, *dst = NULL;
- if (t != b->set && bset_unwritten(b, i)) {
+ if (t != b->set && !bset_written(b, i)) {
src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b),
(void *) btree_bkey_last(b, t -1));
continue;
if (bkey_whiteout(k)) {
- unreserve_whiteout(b, t, k);
+ unreserve_whiteout(b, k);
memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
set_bkeyp_val_u64s(f, u_pos, 0);
u_pos = bkey_next(u_pos);
start = btree_bkey_first(b, t);
end = btree_bkey_last(b, t);
- if (bset_unwritten(b, i) &&
+ if (!bset_written(b, i) &&
t != b->set) {
struct bset *dst =
max_t(struct bset *, write_block(b),
for (unwritten_idx = 0;
unwritten_idx < b->nsets;
unwritten_idx++)
- if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
+ if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
break;
if (b->nsets - unwritten_idx > 1) {
for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t,
- bset_unwritten(b, bset(b, t)) &&
+ !bset_written(b, bset(b, t)) &&
t == bset_tree_last(b));
}
clear_btree_node_just_written(b);
/*
- * Note: immediately after write, bset_unwritten()/bset_written() don't
- * work - the amount of data we had to write after compaction might have
- * been smaller than the offset of the last bset.
+ * Note: immediately after write, bset_written() doesn't work - the
+ * amount of data we had to write after compaction might have been
+ * smaller than the offset of the last bset.
*
* However, we know that all bsets have been written here, as long as
* we're still holding the write lock:
return b->set + b->nsets - 1;
}
+static inline void *
+__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
+{
+ return (void *) ((u64 *) b->data + 1 + offset);
+}
+
+static inline u16
+__btree_node_ptr_to_offset(const struct btree *b, const void *p)
+{
+ u16 ret = (u64 *) p - 1 - (u64 *) b->data;
+
+ EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
+ return ret;
+}
+
static inline struct bset *bset(const struct btree *b,
const struct bset_tree *t)
{
- return (void *) b->data + t->data_offset * sizeof(u64);
+ return __btree_node_offset_to_ptr(b, t->data_offset);
+}
+
+static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+{
+ t->end_offset =
+ __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+ const struct bset *i)
+{
+ t->data_offset = __btree_node_ptr_to_offset(b, i);
+ set_btree_bset_end(b, t);
}
static inline struct bset *btree_bset_first(struct btree *b)
static inline u16
__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
{
- size_t ret = (u64 *) k - (u64 *) b->data - 1;
-
- EBUG_ON(ret > U16_MAX);
- return ret;
+ return __btree_node_ptr_to_offset(b, k);
}
static inline struct bkey_packed *
__btree_node_offset_to_key(const struct btree *b, u16 k)
{
- return (void *) ((u64 *) b->data + k + 1);
+ return __btree_node_offset_to_ptr(b, k);
}
static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
}
-#define btree_bkey_first(_b, _t) (bset(_b, _t)->start)
+#define btree_bkey_first(_b, _t) \
+({ \
+ EBUG_ON(bset(_b, _t)->start != \
+ __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
+ \
+ bset(_b, _t)->start; \
+})
#define btree_bkey_last(_b, _t) \
({ \
__btree_node_offset_to_key(_b, (_t)->end_offset); \
})
-static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
-{
- t->end_offset =
- __btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
- btree_bkey_last(b, t);
-}
-
-static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
- const struct bset *i)
-{
- t->data_offset = (u64 *) i - (u64 *) b->data;
-
- EBUG_ON(bset(b, t) != i);
-
- set_btree_bset_end(b, t);
-}
-
static inline unsigned bset_byte_offset(struct btree *b, void *i)
{
return i - (void *) b->data;
return (void *) b->data + (b->written << 9);
}
+static inline bool __btree_addr_written(struct btree *b, void *p)
+{
+ return p < write_block(b);
+}
+
static inline bool bset_written(struct btree *b, struct bset *i)
{
- return (void *) i < write_block(b);
+ return __btree_addr_written(b, i);
}
-static inline bool bset_unwritten(struct btree *b, struct bset *i)
+static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
{
- return (void *) i > write_block(b);
+ return __btree_addr_written(b, k);
}
static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
return NULL;
}
-static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+static inline void unreserve_whiteout(struct btree *b, struct bkey_packed *k)
{
- if (bset_written(b, bset(b, t))) {
+ if (bkey_written(b, k)) {
EBUG_ON(b->uncompacted_whiteout_u64s <
bkeyp_key_u64s(&b->format, k));
b->uncompacted_whiteout_u64s -=
}
}
-static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+static inline void reserve_whiteout(struct btree *b, struct bkey_packed *k)
{
- if (bset_written(b, bset(b, t))) {
+ if (bkey_written(b, k)) {
BUG_ON(!k->needs_whiteout);
b->uncompacted_whiteout_u64s +=
bkeyp_key_u64s(&b->format, k);
t = bch2_bkey_to_bset(b, k);
- if (bset_unwritten(b, bset(b, t)) &&
+ if (!bkey_written(b, k) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
!bkey_whiteout(&insert->k)) {
k->type = insert->k.type;
k->u64s, k->u64s);
if (bkey_whiteout(&insert->k)) {
- reserve_whiteout(b, t, k);
+ reserve_whiteout(b, k);
return true;
} else {
k->needs_whiteout = false;
* what k points to)
*/
bkey_reassemble(&split.k, k.s_c);
- split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
+ split.k.k.needs_whiteout |= bkey_written(b, _k);
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD;
- reserve_whiteout(b, t, _k);
+ reserve_whiteout(b, _k);
} else if (k.k->needs_whiteout ||
- bset_written(b, bset(b, t))) {
+ bkey_written(b, _k)) {
struct bkey_i discard = *insert;
discard.k.type = KEY_TYPE_DISCARD;
break;
if (k.k->size &&
- (k.k->needs_whiteout || bset_written(b, bset(b, t))))
+ (k.k->needs_whiteout || bkey_written(b, _k)))
insert->k.needs_whiteout = true;
if (overlap == BCH_EXTENT_OVERLAP_ALL &&
bkey_whiteout(k.k) &&
k.k->needs_whiteout) {
- unreserve_whiteout(b, t, _k);
+ unreserve_whiteout(b, _k);
_k->needs_whiteout = false;
}
squash: