void bch2_bset_init_next(struct bch_fs *, struct btree *,
struct btree_node_entry *);
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
-void bch2_bset_fix_invalidated_key(struct btree *, struct bset_tree *,
- struct bkey_packed *);
+void bch2_bset_fix_invalidated_key(struct btree *, struct bkey_packed *);
void bch2_bset_insert(struct btree *, struct btree_node_iter *,
struct bkey_packed *, struct bkey_i *, unsigned);
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
}
+static inline struct bset_tree *
+bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
+{
+ unsigned offset = __btree_node_key_to_offset(b, k);
+ struct bset_tree *t;
+
+ for_each_bset(b, t)
+ if (offset <= t->end_offset) {
+ EBUG_ON(offset < btree_bkey_first_offset(t));
+ return t;
+ }
+
+ BUG();
+}
+
struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
#define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
btree_keys_account_key(_nr, _bset_idx, _k, -1)
+#define btree_account_key_add(_b, _k) \
+ btree_keys_account_key(&(_b)->nr, \
+ bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
+#define btree_account_key_drop(_b, _k) \
+ btree_keys_account_key(&(_b)->nr, \
+ bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
+
struct bset_stats {
struct {
size_t nr, bytes;
{
const struct bkey_format *f = &b->format;
struct bkey_packed *k;
- struct bset_tree *t;
unsigned clobber_u64s;
EBUG_ON(btree_node_just_written(b));
if (k && !bkey_cmp_packed(b, k, &insert->k)) {
BUG_ON(bkey_whiteout(k));
- t = bch2_bkey_to_bset(b, k);
-
if (!bkey_written(b, k) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
!bkey_whiteout(&insert->k)) {
insert->k.needs_whiteout = k->needs_whiteout;
- btree_keys_account_key_drop(&b->nr, t - b->set, k);
+ btree_account_key_drop(b, k);
- if (t == bset_tree_last(b)) {
+ if (k >= btree_bset_last(b)->start) {
clobber_u64s = k->u64s;
/*
*/
if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
bch2_bset_delete(b, k, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
+ bch2_btree_node_iter_fix(iter, b, node_iter,
k, clobber_u64s, 0);
bch2_btree_iter_verify(iter, b);
return true;
}
k->type = KEY_TYPE_DELETED;
- bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
+ bch2_btree_node_iter_fix(iter, b, node_iter, k,
k->u64s, k->u64s);
bch2_btree_iter_verify(iter, b);
insert->k.needs_whiteout = false;
}
- t = bset_tree_last(b);
- k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
+ k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
clobber_u64s = 0;
overwrite:
bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
- bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
+ bch2_btree_node_iter_fix(iter, b, node_iter, k,
clobber_u64s, k->u64s);
bch2_btree_iter_verify(iter, b);
return true;
struct bkey_i *insert)
{
struct btree_iter_level *l = &iter->l[0];
- struct bset_tree *t = bset_tree_last(l->b);
struct btree_node_iter node_iter;
struct bkey_packed *k;
bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
return;
- k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, t);
+ k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
bch2_bset_insert(l->b, &l->iter, k, insert, 0);
- bch2_btree_node_iter_fix(iter, l->b, &l->iter, t, k, 0, k->u64s);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
bch2_btree_iter_verify(iter, l->b);
}
static void
extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
- struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
+ struct bkey_packed *_k, struct bkey_s k,
enum bch_extent_overlap overlap)
{
struct bch_fs *c = s->trans->c;
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
- struct btree *b = l->b;
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
- extent_save(b, _k, k.k);
+ extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k);
break;
/* insert overlaps with end of k: */
bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
BUG_ON(bkey_deleted(k.k));
- extent_save(b, _k, k.k);
+ extent_save(l->b, _k, k.k);
/*
* As the auxiliary tree is indexed by the end of the
* key and we've just changed the end, update the
* auxiliary tree.
*/
- bch2_bset_fix_invalidated_key(b, t, _k);
- bch2_btree_node_iter_fix(iter, b, &l->iter, t,
+ bch2_bset_fix_invalidated_key(l->b, _k);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, _k->u64s, _k->u64s);
verify_modified_extent(iter, _k);
break;
case BCH_EXTENT_OVERLAP_ALL: {
/* The insert key completely covers k, invalidate k */
if (!bkey_whiteout(k.k))
- btree_keys_account_key_drop(&b->nr,
- t - b->set, _k);
+ btree_account_key_drop(l->b, _k);
bch2_drop_subtract(s, k);
- if (t == bset_tree_last(l->b)) {
+ if (_k >= btree_bset_last(l->b)->start) {
unsigned u64s = _k->u64s;
bch2_bset_delete(l->b, _k, _k->u64s);
- bch2_btree_node_iter_fix(iter, b, &l->iter, t,
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, u64s, 0);
- bch2_btree_iter_verify(iter, b);
+ bch2_btree_iter_verify(iter, l->b);
} else {
- extent_save(b, _k, k.k);
- bch2_btree_node_iter_fix(iter, b, &l->iter, t,
+ extent_save(l->b, _k, k.k);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter,
_k, _k->u64s, _k->u64s);
verify_modified_extent(iter, _k);
}
* what k points to)
*/
bkey_reassemble(&split.k, k.s_c);
- split.k.k.needs_whiteout |= bkey_written(b, _k);
+ split.k.k.needs_whiteout |= bkey_written(l->b, _k);
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
- extent_save(b, _k, k.k);
+ extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k);
bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
{
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
- struct btree *b = l->b;
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
while (bkey_cmp(s->committed, insert->k.p) < 0 &&
- (_k = bch2_btree_node_iter_peek_filter(&l->iter, b,
+ (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
KEY_TYPE_DISCARD))) {
- struct bset_tree *t = bch2_bkey_to_bset(b, _k);
- struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
+ struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
!bkey_cmp(insert->k.p, k.k->p) &&
!bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
if (!bkey_whiteout(k.k)) {
- btree_keys_account_key_drop(&b->nr, t - b->set, _k);
+ btree_account_key_drop(l->b, _k);
bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD;
- reserve_whiteout(b, _k);
+ reserve_whiteout(l->b, _k);
}
break;
}
- if (k.k->needs_whiteout || bkey_written(b, _k)) {
+ if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
insert->k.needs_whiteout = true;
s->update_btree = true;
}
overlap == BCH_EXTENT_OVERLAP_ALL &&
bkey_whiteout(k.k) &&
k.k->needs_whiteout) {
- unreserve_whiteout(b, _k);
+ unreserve_whiteout(l->b, _k);
_k->needs_whiteout = false;
}
- extent_squash(s, insert, t, _k, k, overlap);
+ extent_squash(s, insert, _k, k, overlap);
if (!s->update_btree)
bch2_cut_front(s->committed, insert);
}
if (bkey_cmp(s->committed, insert->k.p) < 0)
- s->committed = bpos_min(s->insert->k->k.p, b->key.k.p);
+ s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
/*
* may have skipped past some deleted extents greater than the insert
struct btree_node_iter node_iter = l->iter;
while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
- bkey_cmp_left_packed(b, _k, &s->committed) > 0)
+ bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
l->iter = node_iter;
}
}
return false;
}
- bch2_bset_fix_invalidated_key(b, t, m);
+ bch2_bset_fix_invalidated_key(b, m);
bch2_btree_node_iter_fix(iter, b, node_iter,
- t, m, m->u64s, m->u64s);
+ m, m->u64s, m->u64s);
verify_modified_extent(iter, m);
return ret == BCH_MERGE_MERGE;