}
struct btree_update *
-bch2_btree_update_start(struct bch_fs *c, enum btree_id id,
+bch2_btree_update_start(struct btree_trans *trans, enum btree_id id,
unsigned nr_nodes, unsigned flags,
struct closure *cl)
{
+ struct bch_fs *c = trans->c;
+ struct journal_preres journal_preres = { 0 };
struct btree_reserve *reserve;
struct btree_update *as;
int ret;
+ ret = bch2_journal_preres_get(&c->journal, &journal_preres,
+ BTREE_UPDATE_JOURNAL_RES,
+ JOURNAL_RES_GET_NONBLOCK);
+ if (ret == -EAGAIN) {
+ bch2_trans_unlock(trans);
+
+ ret = bch2_journal_preres_get(&c->journal, &journal_preres,
+ BTREE_UPDATE_JOURNAL_RES,
+ JOURNAL_RES_GET_NONBLOCK);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!bch2_trans_relock(trans)) {
+ bch2_journal_preres_put(&c->journal, &journal_preres);
+ return ERR_PTR(-EINTR);
+ }
+ }
+
reserve = bch2_btree_reserve_get(c, nr_nodes, flags, cl);
if (IS_ERR(reserve))
return ERR_CAST(reserve);
as->btree_id = id;
as->reserve = reserve;
INIT_LIST_HEAD(&as->write_blocked_list);
+ as->journal_preres = journal_preres;
bch2_keylist_init(&as->parent_keys, as->inline_keys);
- ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
- ARRAY_SIZE(as->journal_entries), 0);
- if (ret) {
- bch2_btree_reserve_put(c, reserve);
- closure_debug_destroy(&as->cl);
- mempool_free(as, &c->btree_interior_update_pool);
- return ERR_PTR(ret);
- }
-
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->list, &c->btree_interior_update_list);
mutex_unlock(&c->btree_interior_update_lock);
goto out;
}
- as = bch2_btree_update_start(c, iter->btree_id,
+ as = bch2_btree_update_start(trans, iter->btree_id,
btree_update_reserve_required(c, b), flags,
!(flags & BTREE_INSERT_NOUNLOCK) ? &cl : NULL);
if (IS_ERR(as)) {
goto err_unlock;
}
- as = bch2_btree_update_start(c, iter->btree_id,
+ as = bch2_btree_update_start(trans, iter->btree_id,
btree_update_reserve_required(c, parent) + 1,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE,
struct btree *n, *parent = btree_node_parent(iter, b);
struct btree_update *as;
- as = bch2_btree_update_start(c, iter->btree_id,
+ as = bch2_btree_update_start(iter->trans, iter->btree_id,
(parent
? btree_update_reserve_required(c, parent)
: 0) + 1,
new_hash = bch2_btree_node_mem_alloc(c);
}
- as = bch2_btree_update_start(c, iter->btree_id,
+ as = bch2_btree_update_start(iter->trans, iter->btree_id,
parent ? btree_update_reserve_required(c, parent) : 0,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
};
+#define BTREE_UPDATE_JOURNAL_RES \
+ ((BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2)
+
/*
* Tracks an in progress split/rewrite of a btree node and the update to the
* parent node:
unsigned nr_new_nodes;
unsigned journal_u64s;
- u64 journal_entries[
- (BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2];
+ u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
/* Only here to reduce stack usage on recursive splits: */
struct keylist parent_keys;
void bch2_btree_update_done(struct btree_update *);
struct btree_update *
-bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
+bch2_btree_update_start(struct btree_trans *, enum btree_id, unsigned,
unsigned, struct closure *);
void bch2_btree_interior_update_will_free_node(struct btree_update *,