return __extent_insert_advance_pos(s, next_pos, k);
 }
 
+void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+{
+       struct btree *b = iter->l[0].b;
+
+       BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
+
+       bch2_cut_back(b->key.k.p, &k->k);
+
+       BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
+}
+
 enum btree_insert_ret
 bch2_extent_can_insert(struct btree_insert *trans,
                       struct btree_insert_entry *insert,
        struct bkey_s_c k;
        int sectors;
 
+       BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
+              !bch2_extent_is_atomic(&insert->k->k, insert->iter));
+
        /*
         * We avoid creating whiteouts whenever possible when deleting, but
         * those optimizations mean we may potentially insert two whiteouts
 
                         struct bch_devs_mask *,
                         struct extent_pick_ptr *);
 
+void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
+
+static inline bool bch2_extent_is_atomic(struct bkey *k,
+                                        struct btree_iter *iter)
+{
+       struct btree *b = iter->l[0].b;
+
+       return bkey_cmp(k->p, b->key.k.p) <= 0 &&
+               bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0;
+}
+
 enum btree_insert_ret
 bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
                       unsigned *);
 
 #include "buckets.h"
 #include "clock.h"
 #include "error.h"
+#include "extents.h"
 #include "fs.h"
 #include "fs-io.h"
 #include "fsck.h"
        hook.need_inode_update  = false;
 
        do {
-               /* XXX: inode->i_size locking */
-               k = bch2_keylist_front(keys);
-               if (min(k->k.p.offset << 9, op->new_i_size) >
-                   op->inode->ei_inode.bi_size)
-                       hook.need_inode_update = true;
+               BKEY_PADDED(k) tmp;
 
-               /* optimization for fewer transaction restarts: */
                ret = bch2_btree_iter_traverse(extent_iter);
                if (ret)
                        goto err;
 
+               bkey_copy(&tmp.k, bch2_keylist_front(keys));
+               k = &tmp.k;
+
+               bch2_extent_trim_atomic(k, extent_iter);
+
+               /* XXX: inode->i_size locking */
+               if (min(k->k.p.offset << 9, op->new_i_size) >
+                   op->inode->ei_inode.bi_size)
+                       hook.need_inode_update = true;
+
                if (hook.need_inode_update) {
                        struct bkey_s_c inode;
 
                if (hook.need_inode_update)
                        op->inode->ei_inode = hook.inode_u;
 
-               BUG_ON(bkey_cmp(extent_iter->pos, k->k.p) < 0);
-               bch2_keylist_pop_front(keys);
+               if (bkey_cmp(extent_iter->pos, bch2_keylist_front(keys)->k.p) < 0)
+                       bch2_cut_front(extent_iter->pos, bch2_keylist_front(keys));
+               else
+                       bch2_keylist_pop_front(keys);
        } while (!bch2_keylist_empty(keys));
 
        bch2_trans_exit(&trans);
                bch2_cut_front(src->pos, ©.k);
                copy.k.k.p.offset -= len >> 9;
 
+               ret = bch2_btree_iter_traverse(dst);
+               if (ret)
+                       goto btree_iter_err;
+
+               bch2_extent_trim_atomic(©.k, dst);
+
                BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(©.k.k)));
 
                ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,