bcachefs: Ensure bch2_btree_iter_next() always advances
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 15 May 2019 13:53:27 +0000 (09:53 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:22 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h

index 5995e8e6c0d78931a677a2f571b33d19f39c5618..cbf9281e195bcd7cff9062bd855b846f45648624 100644 (file)
@@ -1110,7 +1110,8 @@ int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
 {
        int ret;
 
-       ret = __bch2_btree_iter_traverse(iter);
+       ret =   bch2_trans_cond_resched(iter->trans) ?:
+               __bch2_btree_iter_traverse(iter);
        if (unlikely(ret))
                ret = __btree_iter_traverse_all(iter->trans, iter, ret);
 
@@ -1302,9 +1303,11 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
                return btree_iter_peek_uptodate(iter);
 
        while (1) {
-               ret = bch2_btree_iter_traverse(iter);
-               if (unlikely(ret))
-                       return bkey_s_c_err(ret);
+               if (iter->uptodate >= BTREE_ITER_NEED_RELOCK) {
+                       ret = bch2_btree_iter_traverse(iter);
+                       if (unlikely(ret))
+                               return bkey_s_c_err(ret);
+               }
 
                k = __btree_iter_peek(iter, l);
                if (likely(k.k))
@@ -1356,10 +1359,17 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
 
        bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
 
+       iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
+
        if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
-               k = bch2_btree_iter_peek(iter);
-               if (IS_ERR_OR_NULL(k.k))
-                       return k;
+               /*
+                * XXX: when we just need to relock we should be able to avoid
+                * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
+                * for that to work
+                */
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+
+               return bch2_btree_iter_peek(iter);
        }
 
        do {
@@ -1559,9 +1569,11 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
        if (iter->uptodate == BTREE_ITER_UPTODATE)
                return btree_iter_peek_uptodate(iter);
 
-       ret = bch2_btree_iter_traverse(iter);
-       if (unlikely(ret))
-               return bkey_s_c_err(ret);
+       if (iter->uptodate >= BTREE_ITER_NEED_RELOCK) {
+               ret = bch2_btree_iter_traverse(iter);
+               if (unlikely(ret))
+                       return bkey_s_c_err(ret);
+       }
 
        return __bch2_btree_iter_peek_slot(iter);
 }
index 18100722ccfd89a77a538f41fc5bcddbd13bbecc..ee2cea2b0b44679be60da9e8c5cdabecb87466a6 100644 (file)
@@ -195,13 +195,14 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
  * Unlocks before scheduling
  * Note: does not revalidate iterator
  */
-static inline void bch2_trans_cond_resched(struct btree_trans *trans)
+static inline int bch2_trans_cond_resched(struct btree_trans *trans)
 {
-       if (need_resched()) {
+       if (need_resched() || race_fault()) {
                bch2_trans_unlock(trans);
                schedule();
-       } else if (race_fault()) {
-               bch2_trans_unlock(trans);
+               return bch2_trans_relock(trans) ? 0 : -EINTR;
+       } else {
+               return 0;
        }
 }
 
@@ -229,8 +230,6 @@ static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
 static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
                                                     unsigned flags)
 {
-       bch2_trans_cond_resched(iter->trans);
-
        return flags & BTREE_ITER_SLOTS
                ? bch2_btree_iter_next_slot(iter)
                : bch2_btree_iter_next(iter);