struct bkey_s_c k;
- BUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
-
if (!path->cached) {
struct btree_path_level *l = path_l(path);
- struct bkey_packed *_k =
- bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ struct bkey_packed *_k;
+
+ EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
+ _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
} else {
struct bkey_cached *ck = (void *) path->l[0].b;
- EBUG_ON(path->btree_id != ck->key.btree_id ||
- bkey_cmp(path->pos, ck->key.pos));
+ EBUG_ON(ck &&
+ (path->btree_id != ck->key.btree_id ||
+ bkey_cmp(path->pos, ck->key.pos)));
- /* BTREE_ITER_CACHED_NOFILL? */
- if (unlikely(!ck->valid))
- goto hole;
+ /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
+ if (unlikely(!ck || !ck->valid))
+ return bkey_s_c_null;
+
+ EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
*u = ck->k->k;
k = bkey_i_to_s_c(ck->k);
return k;
}
+/*
+ * Checks btree key cache for key at iter->pos and returns it if present, or
+ * bkey_s_c_null:
+ */
+static noinline
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
+ struct bkey u;
+ int ret;
+
+ if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
+ return bkey_s_c_null;
+
+ if (!iter->key_cache_path)
+ iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
+ iter->flags & BTREE_ITER_INTENT, 0,
+ iter->flags|BTREE_ITER_CACHED);
+
+ iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
+ iter->flags & BTREE_ITER_INTENT);
+
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ iter->key_cache_path->should_be_locked = true;
+
+ return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
+}
+
static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
struct btree_trans *trans = iter->trans;
struct bkey_i *next_update;
- struct bkey_s_c k;
+ struct bkey_s_c k, k2;
int ret;
EBUG_ON(iter->path->cached || iter->path->level);
goto out;
}
+ iter->path->should_be_locked = true;
+
k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ k.k &&
+ (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ ret = bkey_err(k2);
+ if (ret) {
+ k = k2;
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ goto out;
+ }
+
+ k = k2;
+ iter->k = *k.k;
+ }
+
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
k = btree_trans_peek_journal(trans, iter, k);
(k = btree_trans_peek_slot_journal(trans, iter)).k)
goto out;
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ if (!bkey_err(k))
+ iter->k = *k.k;
+ goto out;
+ }
+
k = bch2_btree_path_peek_slot(iter->path, &iter->k);
} else {
struct bpos next;
if (iter->update_path)
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
+ if (iter->key_cache_path)
+ bch2_path_put(trans, iter->key_cache_path,
+ iter->flags & BTREE_ITER_INTENT);
iter->path = NULL;
iter->update_path = NULL;
+ iter->key_cache_path = NULL;
}
static void __bch2_trans_iter_init(struct btree_trans *trans,
if (trans->journal_replay_not_finished)
flags |= BTREE_ITER_WITH_JOURNAL;
- if (!btree_id_cached(trans->c, btree_id))
+ if (!btree_id_cached(trans->c, btree_id)) {
flags &= ~BTREE_ITER_CACHED;
+ flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+ } else if (!(flags & BTREE_ITER_CACHED))
+ flags |= BTREE_ITER_WITH_KEY_CACHE;
iter->trans = trans;
iter->path = NULL;
iter->update_path = NULL;
+ iter->key_cache_path = NULL;
iter->btree_id = btree_id;
iter->min_depth = depth;
iter->flags = flags;
__btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
if (src->update_path)
__btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
+ dst->key_cache_path = NULL;
}
void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
struct btree_path *ck_path,
struct bkey_cached *ck)
{
- struct btree_iter iter;
+ struct btree_path *path;
struct bkey_s_c k;
unsigned new_u64s = 0;
struct bkey_i *new_k = NULL;
+ struct bkey u;
int ret;
- bch2_trans_iter_init(trans, &iter, ck->key.btree_id,
- ck->key.pos, BTREE_ITER_SLOTS);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
+ path = bch2_path_get(trans, ck->key.btree_id, ck->key.pos, 0, 0, 0);
+ ret = bch2_btree_path_traverse(trans, path, 0);
if (ret)
goto err;
+ k = bch2_btree_path_peek_slot(path, &u);
+
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_trans_restart_relock_key_cache_fill(trans->fn,
_THIS_IP_, ck_path->btree_id, &ck_path->pos);
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
- set_btree_iter_dontneed(&iter);
+ path->preserve = false;
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_path_put(trans, path, 0);
return ret;
}
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_CACHED_NOCREATE|
BTREE_ITER_INTENT);
+ b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+
ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
goto out;