}
}
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct list_head *freed = pcpu_read_locks
? &bc->freed_pcpu
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_trans *trans,
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
enum six_lock_type lock_type,
bool sync)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
u32 seq;
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
+ if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
}
- b = bch2_btree_node_mem_alloc(c, level != 0);
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
- if (trans && b == ERR_PTR(-ENOMEM)) {
+ if (b == ERR_PTR(-ENOMEM)) {
trans->memory_allocation_failure = true;
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
if (!sync)
return NULL;
- if (trans) {
+ if (path) {
int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path);
if (ret) {
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- if (trans)
+ if (path)
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
}
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
+ b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
+ b = bch2_btree_node_fill(trans, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
return b;
}
-int bch2_btree_node_prefetch(struct bch_fs *c,
- struct btree_trans *trans,
+int bch2_btree_node_prefetch(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
if (b)
return 0;
- b = bch2_btree_node_fill(c, trans, path, k, btree_id,
+ b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *, bool);
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
const struct bkey_i *, unsigned,
struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
enum btree_id, unsigned, bool);
-int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, struct btree_path *,
+int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
}
}
-int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
- const struct bkey_i *k, unsigned level)
+static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct closure cl;
struct btree *b;
int ret;
closure_sync(&cl);
} while (ret);
- b = bch2_btree_node_mem_alloc(c, level != 0);
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
bch2_btree_cache_cannibalize_unlock(c);
BUG_ON(IS_ERR(b));
return ret;
}
+int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
+{
+ return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level));
+
+}
+
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w)
{
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1);
}
break;
bch2_bkey_buf_reassemble(&tmp, c, k);
- ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1);
}
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
mem_alloc:
- b = bch2_btree_node_mem_alloc(c, interior_node);
+ b = bch2_btree_node_mem_alloc(trans, interior_node);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
return ret;
}
- new_hash = bch2_btree_node_mem_alloc(c, false);
+ new_hash = bch2_btree_node_mem_alloc(trans, false);
}
path->intent_ref++;
bch2_btree_set_root_inmem(c, b);
}
-void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
{
+ struct bch_fs *c = trans->c;
struct closure cl;
struct btree *b;
int ret;
closure_sync(&cl);
} while (ret);
- b = bch2_btree_node_mem_alloc(c, false);
+ b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
+ return 0;
+}
+
+void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+{
+ bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id));
}
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)