btree_key_can_insert(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
- unsigned *u64s)
+ unsigned u64s)
{
struct bch_fs *c = trans->c;
struct btree *b = iter_l(iter)->b;
- static enum btree_insert_ret ret;
if (unlikely(btree_node_fake(b)))
return BTREE_INSERT_BTREE_NODE_FULL;
if (unlikely(btree_node_old_extent_overwrite(b)))
return BTREE_INSERT_BTREE_NODE_FULL;
- ret = !(iter->flags & BTREE_ITER_IS_EXTENTS)
- ? BTREE_INSERT_OK
- : bch2_extent_can_insert(trans, iter, insert);
- if (ret)
- return ret;
-
- if (*u64s > bch_btree_keys_u64s_remaining(c, b))
+ if (unlikely(u64s > bch_btree_keys_u64s_remaining(c, b)))
return BTREE_INSERT_BTREE_NODE_FULL;
return BTREE_INSERT_OK;
btree_key_can_insert_cached(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
- unsigned *u64s)
+ unsigned u64s)
{
struct bkey_cached *ck = (void *) iter->l[0].b;
unsigned new_u64s;
BUG_ON(iter->level);
- if (*u64s <= ck->u64s)
+ if (u64s <= ck->u64s)
return BTREE_INSERT_OK;
- new_u64s = roundup_pow_of_two(*u64s);
+ new_u64s = roundup_pow_of_two(u64s);
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k)
return -ENOMEM;
u64s += i->k->k.u64s;
ret = btree_iter_type(i->iter) != BTREE_ITER_CACHED
- ? btree_key_can_insert(trans, i->iter, i->k, &u64s)
- : btree_key_can_insert_cached(trans, i->iter, i->k, &u64s);
+ ? btree_key_can_insert(trans, i->iter, i->k, u64s)
+ : btree_key_can_insert_cached(trans, i->iter, i->k, u64s);
if (ret) {
*stopped_at = i;
return ret;
struct bkey_i *insert)
{
struct btree_iter *iter;
+ int ret;
+
+ ret = bch2_extent_can_insert(trans, orig_iter, insert);
+ if (ret)
+ return ret;
if (bkey_deleted(&insert->k))
return 0;
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret = 0;
disk_res_sectors);
trans_for_each_update(trans, i) {
- struct btree_iter *iter = i->iter;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
- struct bkey_packed *_k;
-
pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
pr_err("%s", buf);
pr_err("overlapping with");
- node_iter = iter->l[0].iter;
- while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
- struct bkey unpacked;
- struct bkey_s_c k;
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
+ struct btree *b = iter_l(i->iter)->b;
+ struct btree_node_iter node_iter = iter_l(i->iter)->iter;
+ struct bkey_packed *_k;
- k = bkey_disassemble(b, _k, &unpacked);
+ while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
+ struct bkey unpacked;
+ struct bkey_s_c k;
- if (btree_node_is_extents(b)
- ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
- : bkey_cmp(i->k->k.p, k.k->p))
- break;
+ pr_info("_k %px format %u", _k, _k->format);
+ k = bkey_disassemble(b, _k, &unpacked);
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- pr_err("%s", buf);
+ if (btree_node_is_extents(b)
+ ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
+ : bkey_cmp(i->k->k.p, k.k->p))
+ break;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ pr_err("%s", buf);
- bch2_btree_node_iter_advance(&node_iter, b);
+ bch2_btree_node_iter_advance(&node_iter, b);
+ }
+ } else {
+ struct bkey_cached *ck = (void *) i->iter->l[0].b;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
+ pr_err("%s", buf);
}
}
}
struct bkey_i *insert,
unsigned flags)
{
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret;