if (!sync)
return NULL;
- /*
- * XXX: this will probably always fail because btree_iter_relock()
- * currently fails for iterators that aren't pointed at a valid btree
- * node
- */
if (iter &&
(!bch2_trans_relock(iter->trans) ||
- !bch2_btree_iter_relock(iter, _THIS_IP_)))
+ !bch2_btree_iter_relock_intent(iter)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
-struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *iter,
const struct bkey_i *k, unsigned level,
enum six_lock_type lock_type,
unsigned long trace_ip)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
if (bch2_btree_node_relock(iter, level + 1))
goto retry;
- trace_trans_restart_btree_node_reused(iter->trans->ip,
+ trace_trans_restart_btree_node_reused(trans->ip,
trace_ip,
iter->btree_id,
&iter->real_pos);
u32 seq = b->c.lock.state.seq;
six_unlock_type(&b->c.lock, lock_type);
- bch2_trans_unlock(iter->trans);
+ bch2_trans_unlock(trans);
bch2_btree_node_wait_on_read(b);
/*
- * XXX: check if this always fails - btree_iter_relock()
- * currently fails for iterators that aren't pointed at a valid
- * btree node
+ * should_be_locked is not set on this iterator yet, so we need
+ * to relock it specifically:
*/
if (iter &&
- (!bch2_trans_relock(iter->trans) ||
- !bch2_btree_iter_relock(iter, _THIS_IP_)))
+ (!bch2_trans_relock(trans) ||
+ !bch2_btree_iter_relock_intent(iter)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
-struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *,
+struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
const struct bkey_i *, unsigned,
enum six_lock_type, unsigned long);
is_btree_node(iter, l)
? iter->l[l].b->c.lock.state.seq
: 0);
-
fail_idx = l;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
+/*
+ * Only for btree_cache.c - only relocks intent locks
+ */
+bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
+{
+ unsigned l;
+
+ for (l = iter->level;
+ l < iter->locks_want && btree_iter_node(iter, l);
+ l++) {
+ if (!bch2_btree_node_relock(iter, l)) {
+ trace_node_relock_fail(iter->trans->ip, _RET_IP_,
+ iter->btree_id, &iter->real_pos,
+ l, iter->l[l].lock_seq,
+ is_btree_node(iter, l)
+ ? 0
+ : (unsigned long) iter->l[l].b,
+ is_btree_node(iter, l)
+ ? iter->l[l].b->c.lock.state.seq
+ : 0);
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ return false;
+ }
+ }
+
+ return true;
+}
+
__flatten
bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{
static __always_inline int btree_iter_down(struct btree_iter *iter,
unsigned long trace_ip)
{
- struct bch_fs *c = iter->trans->c;
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
struct btree_iter_level *l = &iter->l[iter->level];
struct btree *b;
unsigned level = iter->level - 1;
bch2_bkey_buf_unpack(&tmp, c, l->b,
bch2_btree_node_iter_peek(&l->iter, l->b));
- b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
+ b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip);
ret = PTR_ERR_OR_ZERO(b);
if (unlikely(ret))
goto err;
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
+bool bch2_btree_iter_relock_intent(struct btree_iter *);
bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
bool bch2_trans_relock(struct btree_trans *);