unsigned used_mempool:1;
unsigned error:1;
unsigned in_traverse_all:1;
+ /*
+ * For when bch2_trans_update notices we'll be splitting a compressed
+ * extent:
+ */
+ unsigned extra_journal_res;
u64 iters_linked;
u64 iters_live;
BTREE_INSERT_OK,
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
- BTREE_INSERT_ENOSPC,
BTREE_INSERT_NEED_MARK_REPLICAS,
BTREE_INSERT_NEED_JOURNAL_RES,
BTREE_INSERT_NEED_JOURNAL_RECLAIM,
ret = -EINTR;
}
break;
- case BTREE_INSERT_ENOSPC:
- BUG_ON(flags & BTREE_INSERT_NOFAIL);
- ret = -ENOSPC;
- break;
case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans);
struct bpos start = bkey_start_pos(&i->k->k);
struct bkey_i *update;
struct bkey_s_c k;
- int ret = 0;
+ int ret = 0, compressed_sectors;
iter = bch2_trans_get_iter(trans, i->btree_id, start,
BTREE_ITER_INTENT|
}
while (bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) > 0) {
+ /*
+ * If we're going to be splitting a compressed extent, note it
+ * so that __bch2_trans_commit() can increase our disk
+ * reservation:
+ */
+ if (bkey_cmp(bkey_start_pos(k.k), start) < 0 &&
+ bkey_cmp(k.k->p, i->k->k.p) > 0 &&
+ (compressed_sectors = bch2_bkey_sectors_compressed(k)))
+ trans->extra_journal_res += compressed_sectors;
+
if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if ((ret = PTR_ERR_OR_ZERO(update)))
trans->journal_preres_u64s += u64s;
trans->journal_u64s += u64s;
}
+
+ if (trans->extra_journal_res) {
+ ret = bch2_disk_reservation_add(trans->c, trans->disk_res,
+ trans->extra_journal_res,
+ (trans->flags & BTREE_INSERT_NOFAIL)
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
+ if (ret)
+ goto err;
+ }
retry:
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
#ifdef CONFIG_BCACHEFS_DEBUG
- BUG_ON(bkey_cmp(iter->pos,
- is_extent ? bkey_start_pos(&k->k) : k->k.p));
-
- trans_for_each_update(trans, i) {
- BUG_ON(bkey_cmp(i->iter->pos, i->k->k.p));
-
+ trans_for_each_update(trans, i)
BUG_ON(i != trans->updates &&
btree_insert_entry_cmp(i - 1, i) >= 0);
- }
#endif
if (is_extent) {
- ret = bch2_extent_can_insert(trans, n.iter, n.k);
- if (ret)
- return ret;
-
ret = extent_handle_overwrites(trans, &n);
if (ret)
return ret;
return !bkey_cmp(end, k->k.p);
}
-
-enum btree_insert_ret
-bch2_extent_can_insert(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *insert)
-{
- struct bkey_s_c k;
- int ret, sectors;
-
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- /* Check if we're splitting a compressed extent: */
-
- if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
- bkey_cmp(insert->k.p, k.k->p) < 0 &&
- (sectors = bch2_bkey_sectors_compressed(k))) {
- int flags = trans->flags & BTREE_INSERT_NOFAIL
- ? BCH_DISK_RESERVATION_NOFAIL : 0;
-
- switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
- sectors, flags)) {
- case 0:
- break;
- case -ENOSPC:
- return BTREE_INSERT_ENOSPC;
- default:
- BUG();
- }
- }
-
- return BTREE_INSERT_OK;
-}