unsigned, unsigned);
 
 int bch2_btree_iter_unlock(struct btree_iter *);
+bool bch2_btree_iter_relock(struct btree_iter *);
 
 bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
 bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
 
                __bch2_btree_node_relock(iter, level);
 }
 
-bool bch2_btree_iter_relock(struct btree_iter *);
-
 void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
 
 void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
 
                              bool direct,
                              s64 *total_delta)
 {
-       struct btree_iter *inode_iter = NULL;
        struct bch_inode_unpacked inode_u;
        struct bkey_inode_buf inode_p;
        bool allocating = false;
        bool extended = false;
+       bool inode_locked = false;
        s64 i_sectors_delta;
        int ret;
 
        /* XXX: inode->i_size locking */
        if (i_sectors_delta ||
            new_i_size > inode->ei_inode.bi_size) {
-               inode_iter = bch2_trans_get_iter(trans,
-                       BTREE_ID_INODES,
-                       POS(k->k.p.inode, 0),
-                       BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-               if (IS_ERR(inode_iter))
-                       return PTR_ERR(inode_iter);
+               bch2_btree_iter_unlock(extent_iter);
+               mutex_lock(&inode->ei_update_lock);
 
-               ret = bch2_btree_iter_traverse(inode_iter);
-               if (ret)
-                       goto err;
+               if (!bch2_btree_iter_relock(extent_iter)) {
+                       mutex_unlock(&inode->ei_update_lock);
+                       return -EINTR;
+               }
+
+               inode_locked = true;
+
+               if (!inode->ei_inode_update)
+                       inode->ei_inode_update =
+                               bch2_deferred_update_alloc(trans->c,
+                                                       BTREE_ID_INODES, 64);
 
                inode_u = inode->ei_inode;
                inode_u.bi_sectors += i_sectors_delta;
 
                bch2_inode_pack(&inode_p, &inode_u);
                bch2_trans_update(trans,
-                       BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
+                       BTREE_INSERT_DEFERRED(inode->ei_inode_update,
+                                             &inode_p.inode.k_i));
        }
 
        ret = bch2_trans_commit(trans, disk_res,
        if (total_delta)
                *total_delta += i_sectors_delta;
 err:
-       if (!IS_ERR_OR_NULL(inode_iter))
-               bch2_trans_iter_put(trans, inode_iter);
+       if (inode_locked)
+               mutex_unlock(&inode->ei_update_lock);
+
        return ret;
 }
 
 static int bchfs_write_index_update(struct bch_write_op *wop)
 {
+       struct bch_fs *c = wop->c;
        struct bchfs_write_op *op = container_of(wop,
                                struct bchfs_write_op, op);
        struct quota_res *quota_res = op->is_dio
 
        BUG_ON(k->k.p.inode != inode->v.i_ino);
 
-       bch2_trans_init(&trans, wop->c);
+       bch2_trans_init(&trans, c);
        bch2_trans_preload_iters(&trans);
 
        iter = bch2_trans_get_iter(&trans,
 
                                inode_set_fn set,
                                void *p)
 {
+       struct bch_fs *c = trans->c;
        struct btree_iter *iter;
        struct bkey_inode_buf *inode_p;
        int ret;
 
        lockdep_assert_held(&inode->ei_update_lock);
 
+       /* XXX: Don't do this with btree locks held */
+       if (!inode->ei_inode_update)
+               inode->ei_inode_update =
+                       bch2_deferred_update_alloc(c, BTREE_ID_INODES, 64);
+#if 0
        iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
                        POS(inode->v.i_ino, 0),
                        BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
        ret = bch2_btree_iter_traverse(iter);
        if (ret)
                return ret;
-
+#endif
        *inode_u = inode->ei_inode;
 
        if (set) {
                return PTR_ERR(inode_p);
 
        bch2_inode_pack(inode_p, inode_u);
-       bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
+
+       if (!inode->ei_inode_update)
+               bch2_trans_update(trans,
+                       BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
+       else
+               bch2_trans_update(trans,
+                       BTREE_INSERT_DEFERRED(inode->ei_inode_update,
+                                             &inode_p->inode.k_i));
+
        return 0;
 }
 
        mutex_init(&inode->ei_update_lock);
        pagecache_lock_init(&inode->ei_pagecache_lock);
        mutex_init(&inode->ei_quota_lock);
+       inode->ei_inode_update = NULL;
        inode->ei_journal_seq = 0;
 
        return &inode->v;
 
        BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
 
+       if (inode->ei_inode_update)
+               bch2_deferred_update_free(c, inode->ei_inode_update);
+       inode->ei_inode_update = NULL;
+
        if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
                bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
                                KEY_TYPE_QUOTA_WARN);
 
        struct inode            v;
 
        struct mutex            ei_update_lock;
+       struct deferred_update  *ei_inode_update;
        u64                     ei_journal_seq;
        u64                     ei_quota_reserved;
        unsigned long           ei_last_dirtied;