struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
 
+       BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
        /*
         * Parent node must be locked, else we could read in a btree node that's
         * been freed:
         */
-       BUG_ON(!btree_node_locked(iter, level + 1));
-       BUG_ON(level >= BTREE_MAX_DEPTH);
+       if (!bch2_btree_node_relock(iter, level + 1))
+               return ERR_PTR(-EINTR);
 
        b = bch2_btree_node_mem_alloc(c);
        if (IS_ERR(b))
        }
 
        /*
-        * If the btree node wasn't cached, we can't drop our lock on
-        * the parent until after it's added to the cache - because
-        * otherwise we could race with a btree_split() freeing the node
-        * we're trying to lock.
+        * Unlock before doing IO:
         *
-        * But the deadlock described below doesn't exist in this case,
-        * so it's safe to not drop the parent lock until here:
+        * XXX: ideally should be dropping all btree node locks here
         */
        if (btree_node_read_locked(iter, level + 1))
                btree_node_unlock(iter, level + 1);
        struct btree *b;
        struct bset_tree *t;
 
-       /*
-        * XXX: locking optimization
-        *
-        * we can make the locking looser here - caller can drop lock on parent
-        * node before locking child node (and potentially blocking): we just
-        * have to have bch2_btree_node_fill() call relock on the parent and
-        * return -EINTR if that fails
-        */
-       EBUG_ON(!btree_node_locked(iter, level + 1));
        EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+       b = btree_node_mem_ptr(k);
+       if (b)
+               goto lock_node;
 retry:
        b = btree_cache_find(bc, k);
        if (unlikely(!b)) {
                if (IS_ERR(b))
                        return b;
        } else {
+lock_node:
                /*
                 * There's a potential deadlock with splits and insertions into
                 * interior nodes we have to avoid:
                }
        }
 
+       /* XXX: waiting on IO with btree locks held: */
        wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
                       TASK_UNINTERRUPTIBLE);
 
 
                btree_node_unlock(iter, iter->level);
 }
 
+static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
+                                           unsigned plevel, struct btree *b)
+{
+       struct btree_iter_level *l = &iter->l[plevel];
+       bool locked = btree_node_locked(iter, plevel);
+       struct bkey_packed *k;
+       struct bch_btree_ptr_v2 *bp;
+
+       if (!bch2_btree_node_relock(iter, plevel))
+               return;
+
+       k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+       BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+
+       bp = (void *) bkeyp_val(&l->b->format, k);
+       bp->mem_ptr = (unsigned long)b;
+
+       if (!locked)
+               btree_node_unlock(iter, plevel);
+}
+
 static __always_inline int btree_iter_down(struct btree_iter *iter)
 {
        struct bch_fs *c = iter->trans->c;
        mark_btree_node_locked(iter, level, lock_type);
        btree_iter_node_set(iter, b);
 
+       if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
+           unlikely(b != btree_node_mem_ptr(&tmp.k)))
+               btree_node_mem_ptr_set(iter, level + 1, b);
+
        if (iter->flags & BTREE_ITER_PREFETCH)
                btree_iter_prefetch(iter);