return path;
}
-struct btree_path *bch2_path_get(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos,
- unsigned locks_want, unsigned level,
- unsigned flags, unsigned long ip)
+btree_path_idx_t bch2_path_get(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ unsigned locks_want, unsigned level,
+ unsigned flags, unsigned long ip)
{
struct btree_path *path, *path_pos = NULL;
bool cached = flags & BTREE_ITER_CACHED;
if (locks_want > path->locks_want)
bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
- return path;
+ return path->idx;
}
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
return bkey_s_c_null;
if (!iter->key_cache_path)
- iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
+ iter->key_cache_path = trans->paths + bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
iter->flags|BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL,
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
-struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
- unsigned, unsigned, unsigned, unsigned long);
+btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned, unsigned long);
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
/*
#ifdef CONFIG_BCACHEFS_DEBUG
iter->ip_allocated = ip;
#endif
- iter->path = bch2_path_get(trans, btree_id, iter->pos,
+ iter->path = trans->paths + bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags, ip);
}
#define TRACK_PATH_ALLOCATED
#endif
+typedef u16 btree_path_idx_t;
+
struct btree_path {
u8 idx;
u8 sorted_idx;
enum btree_update_flags flags,
unsigned long ip)
{
- struct btree_path *btree_path;
struct bkey k;
int ret;
- btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
- BTREE_ITER_INTENT, _THIS_IP_);
- ret = bch2_btree_path_traverse(trans, btree_path, 0);
+ btree_path_idx_t path_idx =
+ bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT, _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, trans->paths + path_idx, 0);
if (ret)
goto out;
+ struct btree_path *btree_path = trans->paths + path_idx;
+
/*
* The old key in the insert entry might actually refer to an existing
* key in the btree that has been deleted from cache and not yet
int ret;
if (!iter->key_cache_path)
- iter->key_cache_path =
+ iter->key_cache_path = trans->paths +
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT|
BTREE_ITER_CACHED, _THIS_IP_);
{
struct btree_path *path;
- path = bch2_path_get(trans, btree_id, pos, level + 1, level,
+ path = trans->paths + bch2_path_get(trans, btree_id, pos, level + 1, level,
BTREE_ITER_NOPRESERVE|
BTREE_ITER_INTENT, _RET_IP_);
path = bch2_btree_path_make_mut(trans, path, true, _RET_IP_);
? bpos_predecessor(b->data->min_key)
: bpos_successor(b->data->max_key);
- sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
+ sib_path = trans->paths + bch2_path_get(trans, path->btree_id, sib_pos,
U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, sib_path, false);
if (ret)