: bch2_btree_iter_next(iter);
 }
 
+static inline int bkey_err(struct bkey_s_c k)
+{
+       return PTR_ERR_OR_ZERO(k.k);
+}
+
 #define for_each_btree_key(_trans, _iter, _btree_id,                   \
                           _start, _flags, _k, _ret)                    \
        for ((_ret) = PTR_ERR_OR_ZERO((_iter) =                         \
             (_ret) = PTR_ERR_OR_ZERO(((_k) =                           \
                        __bch2_btree_iter_next(_iter, _flags)).k))
 
-#define for_each_btree_key_continue(_iter, _flags, _k)                 \
+#define for_each_btree_key_continue(_iter, _flags, _k, _ret)           \
        for ((_k) = __bch2_btree_iter_peek(_iter, _flags);              \
-            !IS_ERR_OR_NULL((_k).k);                                   \
+            !((_ret) = bkey_err(_k)) && (_k).k;                        \
             (_k) = __bch2_btree_iter_next(_iter, _flags))
 
-static inline int bkey_err(struct bkey_s_c k)
-{
-       return PTR_ERR_OR_ZERO(k.k);
-}
-
 /* new multiple iterator interface: */
 
 int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
 
        iter = bch2_trans_copy_iter(trans, h->chain);
        BUG_ON(IS_ERR(iter));
 
-       for_each_btree_key_continue(iter, 0, k2) {
+       for_each_btree_key_continue(iter, 0, k2, ret) {
                if (bkey_cmp(k2.k->p, k.k->p) >= 0)
                        break;
 
        iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
                                   POS(BCACHEFS_ROOT_INO, 0), 0);
 retry:
-       for_each_btree_key_continue(iter, 0, k) {
+       for_each_btree_key_continue(iter, 0, k, ret) {
                ret = walk_inode(&trans, &w, k.k->p.inode);
                if (ret)
                        break;
        iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
                                   POS(BCACHEFS_ROOT_INO, 0), 0);
 retry:
-       for_each_btree_key_continue(iter, 0, k) {
+       for_each_btree_key_continue(iter, 0, k, ret) {
                struct bkey_s_c_dirent d;
                struct bch_inode_unpacked target;
                bool have_target;
        iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
                                   POS(BCACHEFS_ROOT_INO, 0), 0);
 retry:
-       for_each_btree_key_continue(iter, 0, k) {
+       for_each_btree_key_continue(iter, 0, k, ret) {
                ret = walk_inode(&trans, &w, k.k->p.inode);
                if (ret)
                        break;
 
        iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS_MIN, 0);
 retry:
-       for_each_btree_key_continue(iter, 0, k) {
+       for_each_btree_key_continue(iter, 0, k, ret) {
                if (k.k->type != KEY_TYPE_inode)
                        continue;
 
                        had_unreachable = true;
                }
        }
-       ret = bch2_trans_iter_free(&trans, iter);
+       bch2_trans_iter_free(&trans, iter);
        if (ret)
                goto err;
 
 
 {
        struct btree_iter *iter;
        struct bkey_s_c k;
+       int ret;
 
        iter = bch2_trans_copy_iter(trans, start);
        if (IS_ERR(iter))
 
        bch2_btree_iter_next_slot(iter);
 
-       for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+       for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
                if (k.k->type != desc.key_type &&
                    k.k->type != KEY_TYPE_whiteout)
                        break;
                }
        }
 
-       return bch2_trans_iter_free(trans, iter);
+       bch2_trans_iter_free(trans, iter);
+       return ret;
 }
 
 static __always_inline