bcachefs: bch2_btree_iter_relock_intent()
authorKent Overstreet <kent.overstreet@gmail.com>
Sat, 24 Jul 2021 21:12:51 +0000 (17:12 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:09 +0000 (17:09 -0400)
This adds a new helper for btree_cache.c that does what we want where
the iterator is still being traverse - and also eliminates some
unnecessary transaction restarts.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.h
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h

index e2c02ae98f83a6faafb49531373103eea685cf7b..6d5cf2a5a1590568bb83487aa46d689fc14cd4d8 100644 (file)
@@ -693,14 +693,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        if (!sync)
                return NULL;
 
-       /*
-        * XXX: this will probably always fail because btree_iter_relock()
-        * currently fails for iterators that aren't pointed at a valid btree
-        * node
-        */
        if (iter &&
            (!bch2_trans_relock(iter->trans) ||
-            !bch2_btree_iter_relock(iter, _THIS_IP_)))
+            !bch2_btree_iter_relock_intent(iter)))
                return ERR_PTR(-EINTR);
 
        if (!six_relock_type(&b->c.lock, lock_type, seq))
@@ -760,11 +755,12 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
  * The btree node will have either a read or a write lock held, depending on
  * the @write parameter.
  */
-struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *iter,
                                  const struct bkey_i *k, unsigned level,
                                  enum six_lock_type lock_type,
                                  unsigned long trace_ip)
 {
+       struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        struct bset_tree *t;
@@ -838,7 +834,7 @@ lock_node:
                        if (bch2_btree_node_relock(iter, level + 1))
                                goto retry;
 
-                       trace_trans_restart_btree_node_reused(iter->trans->ip,
+                       trace_trans_restart_btree_node_reused(trans->ip,
                                                              trace_ip,
                                                              iter->btree_id,
                                                              &iter->real_pos);
@@ -850,18 +846,17 @@ lock_node:
                u32 seq = b->c.lock.state.seq;
 
                six_unlock_type(&b->c.lock, lock_type);
-               bch2_trans_unlock(iter->trans);
+               bch2_trans_unlock(trans);
 
                bch2_btree_node_wait_on_read(b);
 
                /*
-                * XXX: check if this always fails - btree_iter_relock()
-                * currently fails for iterators that aren't pointed at a valid
-                * btree node
+                * should_be_locked is not set on this iterator yet, so we need
+                * to relock it specifically:
                 */
                if (iter &&
-                   (!bch2_trans_relock(iter->trans) ||
-                    !bch2_btree_iter_relock(iter, _THIS_IP_)))
+                   (!bch2_trans_relock(trans) ||
+                    !bch2_btree_iter_relock_intent(iter)))
                        return ERR_PTR(-EINTR);
 
                if (!six_relock_type(&b->c.lock, lock_type, seq))
index 40dd263a7caa7b043ab75792cb30a94660399572..6e9f08597d946e420dfa570417f0bf005f690043 100644 (file)
@@ -20,7 +20,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
 struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
 
-struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *,
+struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
                                  const struct bkey_i *, unsigned,
                                  enum six_lock_type, unsigned long);
 
index 745bf48241fd5b61bbce3504a807e3172b8eefd3..5c3404699136da368365023dba225bc10ac3d0db 100644 (file)
@@ -205,7 +205,6 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
                                        is_btree_node(iter, l)
                                        ? iter->l[l].b->c.lock.state.seq
                                        : 0);
-
                        fail_idx = l;
                        btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
                }
@@ -382,6 +381,34 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
 #endif
 
+/*
+ * Only for btree_cache.c - only relocks intent locks
+ */
+bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
+{
+       unsigned l;
+
+       for (l = iter->level;
+            l < iter->locks_want && btree_iter_node(iter, l);
+            l++) {
+               if (!bch2_btree_node_relock(iter, l)) {
+                       trace_node_relock_fail(iter->trans->ip, _RET_IP_,
+                                       iter->btree_id, &iter->real_pos,
+                                       l, iter->l[l].lock_seq,
+                                       is_btree_node(iter, l)
+                                       ? 0
+                                       : (unsigned long) iter->l[l].b,
+                                       is_btree_node(iter, l)
+                                       ? iter->l[l].b->c.lock.state.seq
+                                       : 0);
+                       btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 __flatten
 bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
 {
@@ -1172,7 +1199,8 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
 static __always_inline int btree_iter_down(struct btree_iter *iter,
                                           unsigned long trace_ip)
 {
-       struct bch_fs *c = iter->trans->c;
+       struct btree_trans *trans  = iter->trans;
+       struct bch_fs *c = trans->c;
        struct btree_iter_level *l = &iter->l[iter->level];
        struct btree *b;
        unsigned level = iter->level - 1;
@@ -1186,7 +1214,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter,
        bch2_bkey_buf_unpack(&tmp, c, l->b,
                         bch2_btree_node_iter_peek(&l->iter, l->b));
 
-       b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
+       b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip);
        ret = PTR_ERR_OR_ZERO(b);
        if (unlikely(ret))
                goto err;
index 7385cca43f8b0eb30b84b784d24ebb186e017384..3889683e16f8223c4aefe351ba2598fa44dbadc8 100644 (file)
@@ -111,6 +111,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
                              struct btree_node_iter *, struct bkey_packed *,
                              unsigned, unsigned);
 
+bool bch2_btree_iter_relock_intent(struct btree_iter *);
 bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
 
 bool bch2_trans_relock(struct btree_trans *);