return bbio->inode && is_data_inode(&bbio->inode->vfs_inode);
 }
 
+static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
+{
+       return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE;
+}
+
 /*
  * Initialize a btrfs_bio structure.  This skips the embedded bio itself as it
  * is already initialized by the block layer.
        bbio->inode = orig_bbio->inode;
        bbio->file_offset = orig_bbio->file_offset;
        orig_bbio->file_offset += map_length;
-
+       if (bbio_has_ordered_extent(bbio)) {
+               refcount_inc(&orig_bbio->ordered->refs);
+               bbio->ordered = orig_bbio->ordered;
+       }
        atomic_inc(&orig_bbio->pending_ios);
        return bbio;
 }
 
+/* Free a bio that was never submitted to the underlying device. */
+static void btrfs_cleanup_bio(struct btrfs_bio *bbio)
+{
+       if (bbio_has_ordered_extent(bbio))
+               btrfs_put_ordered_extent(bbio->ordered);
+       bio_put(&bbio->bio);
+}
+
+static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
+{
+       if (bbio_has_ordered_extent(bbio)) {
+               struct btrfs_ordered_extent *ordered = bbio->ordered;
+
+               bbio->end_io(bbio);
+               btrfs_put_ordered_extent(ordered);
+       } else {
+               bbio->end_io(bbio);
+       }
+}
+
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
+{
+       bbio->bio.bi_status = status;
+       __btrfs_bio_end_io(bbio);
+}
+
 static void btrfs_orig_write_end_io(struct bio *bio);
 
 static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
 
                if (bbio->bio.bi_status)
                        btrfs_bbio_propagate_error(bbio, orig_bbio);
-               bio_put(&bbio->bio);
+               btrfs_cleanup_bio(bbio);
                bbio = orig_bbio;
        }
 
        if (atomic_dec_and_test(&bbio->pending_ios))
-               bbio->end_io(bbio);
+               __btrfs_bio_end_io(bbio);
 }
 
 static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
 
 fail_put_bio:
        if (map_length < length)
-               bio_put(bio);
+               btrfs_cleanup_bio(bbio);
 fail:
        btrfs_bio_counter_dec(fs_info);
        btrfs_bio_end_io(orig_bbio, ret);
 
 
                /*
                 * For data writes:
+                * - ordered extent covering the bio
                 * - pointer to the checksums for this bio
                 * - original physical address from the allocator
                 *   (for zone append only)
                 */
                struct {
+                       struct btrfs_ordered_extent *ordered;
                        struct btrfs_ordered_sum *sums;
                        u64 orig_physical;
                };
 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
                                  struct btrfs_fs_info *fs_info,
                                  btrfs_bio_end_io_t end_io, void *private);
-
-static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
-{
-       bbio->bio.bi_status = status;
-       bbio->end_io(bbio);
-}
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
 
 /* Submit using blkcg_punt_bio_submit. */
 #define REQ_BTRFS_CGROUP_PUNT                  REQ_FS_PRIVATE
 
        INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
        cb->nr_pages = nr_pages;
        cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
+       cb->bbio.ordered = ordered;
        btrfs_add_compressed_bio_pages(cb);
 
        btrfs_submit_bio(&cb->bbio, 0);
-       btrfs_put_ordered_extent(ordered);
 }
 
 /*
 
                        bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
                                        ordered->file_offset +
                                        ordered->disk_num_bytes - file_offset);
-                       btrfs_put_ordered_extent(ordered);
+                       bbio->ordered = ordered;
                }
 
                /*
 
  */
 blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
 {
-       struct btrfs_ordered_extent *ordered =
-               btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset);
-
-       if (WARN_ON_ONCE(!ordered))
-               return BLK_STS_IOERR;
-
        bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS);
        if (!bbio->sums)
                return BLK_STS_RESOURCE;
        bbio->sums->len = bbio->bio.bi_iter.bi_size;
        bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
-       btrfs_add_ordered_sum(ordered, bbio->sums);
-       btrfs_put_ordered_extent(ordered);
+       btrfs_add_ordered_sum(bbio->ordered, bbio->sums);
        return 0;
 }
 
 
                return -EINVAL;
 
        /* No need to split if the ordered extent covers the entire bio. */
-       if (ordered->disk_num_bytes == len)
+       if (ordered->disk_num_bytes == len) {
+               refcount_inc(&ordered->refs);
+               bbio->ordered = ordered;
                return 0;
+       }
 
        /*
         * Don't split the extent_map for NOCOW extents, as we're writing into
        new = btrfs_split_ordered_extent(ordered, len);
        if (IS_ERR(new))
                return PTR_ERR(new);
-       btrfs_put_ordered_extent(new);
-
+       bbio->ordered = new;
        return 0;
 }