zi->i_flags &= ~ZONEFS_ZONE_OPEN;
 }
 
-static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
-                             unsigned int flags, struct iomap *iomap,
-                             struct iomap *srcmap)
+static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+                                  loff_t length, unsigned int flags,
+                                  struct iomap *iomap, struct iomap *srcmap)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
        struct super_block *sb = inode->i_sb;
        loff_t isize;
 
-       /* All I/Os should always be within the file maximum size */
+       /*
+        * All blocks are always mapped below EOF. If reading past EOF,
+        * act as if there is a hole up to the file maximum size.
+        */
+       mutex_lock(&zi->i_truncate_mutex);
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+       isize = i_size_read(inode);
+       if (iomap->offset >= isize) {
+               iomap->type = IOMAP_HOLE;
+               iomap->addr = IOMAP_NULL_ADDR;
+               iomap->length = length;
+       } else {
+               iomap->type = IOMAP_MAPPED;
+               iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+               iomap->length = isize - iomap->offset;
+       }
+       mutex_unlock(&zi->i_truncate_mutex);
+
+       trace_zonefs_iomap_begin(inode, iomap);
+
+       return 0;
+}
+
+static const struct iomap_ops zonefs_read_iomap_ops = {
+       .iomap_begin    = zonefs_read_iomap_begin,
+};
+
+static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+                                   loff_t length, unsigned int flags,
+                                   struct iomap *iomap, struct iomap *srcmap)
+{
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct super_block *sb = inode->i_sb;
+       loff_t isize;
+
+       /* All write I/Os should always be within the file maximum size */
        if (WARN_ON_ONCE(offset + length > zi->i_max_size))
                return -EIO;
 
         * operation.
         */
        if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
-                        (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
+                        !(flags & IOMAP_DIRECT)))
                return -EIO;
 
        /*
         * write pointer) and unwriten beyond.
         */
        mutex_lock(&zi->i_truncate_mutex);
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+       iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
        isize = i_size_read(inode);
-       if (offset >= isize)
+       if (iomap->offset >= isize) {
                iomap->type = IOMAP_UNWRITTEN;
-       else
+               iomap->length = zi->i_max_size - iomap->offset;
+       } else {
                iomap->type = IOMAP_MAPPED;
-       if (flags & IOMAP_WRITE)
-               length = zi->i_max_size - offset;
-       else
-               length = min(length, isize - offset);
+               iomap->length = isize - iomap->offset;
+       }
        mutex_unlock(&zi->i_truncate_mutex);
 
-       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
-       iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
-       iomap->bdev = inode->i_sb->s_bdev;
-       iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
-
        trace_zonefs_iomap_begin(inode, iomap);
 
        return 0;
 }
 
-static const struct iomap_ops zonefs_iomap_ops = {
-       .iomap_begin    = zonefs_iomap_begin,
+static const struct iomap_ops zonefs_write_iomap_ops = {
+       .iomap_begin    = zonefs_write_iomap_begin,
 };
 
 static int zonefs_readpage(struct file *unused, struct page *page)
 {
-       return iomap_readpage(page, &zonefs_iomap_ops);
+       return iomap_readpage(page, &zonefs_read_iomap_ops);
 }
 
 static void zonefs_readahead(struct readahead_control *rac)
 {
-       iomap_readahead(rac, &zonefs_iomap_ops);
+       iomap_readahead(rac, &zonefs_read_iomap_ops);
 }
 
 /*
  * Map blocks for page writeback. This is used only on conventional zone files,
  * which implies that the page range can only be within the fixed inode size.
  */
-static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
-                            struct inode *inode, loff_t offset)
+static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+                                  struct inode *inode, loff_t offset)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
 
            offset < wpc->iomap.offset + wpc->iomap.length)
                return 0;
 
-       return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
-                                 IOMAP_WRITE, &wpc->iomap, NULL);
+       return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
+                                       IOMAP_WRITE, &wpc->iomap, NULL);
 }
 
 static const struct iomap_writeback_ops zonefs_writeback_ops = {
-       .map_blocks             = zonefs_map_blocks,
+       .map_blocks             = zonefs_write_map_blocks,
 };
 
 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
                return -EINVAL;
        }
 
-       return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
+       return iomap_swapfile_activate(sis, swap_file, span,
+                                      &zonefs_read_iomap_ops);
 }
 
 static const struct address_space_operations zonefs_file_aops = {
 
        /* Serialize against truncates */
        filemap_invalidate_lock_shared(inode->i_mapping);
-       ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
+       ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
        filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
        if (append)
                ret = zonefs_file_dio_append(iocb, from);
        else
-               ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+               ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
                                   &zonefs_write_dio_ops, 0, 0);
        if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
            (ret > 0 || ret == -EIOCBQUEUED)) {
        if (ret <= 0)
                goto inode_unlock;
 
-       ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
+       ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
        if (ret > 0)
                iocb->ki_pos += ret;
        else if (ret == -EIO)
                        goto inode_unlock;
                }
                file_accessed(iocb->ki_filp);
-               ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
+               ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
                                   &zonefs_read_dio_ops, 0, 0);
        } else {
                ret = generic_file_read_iter(iocb, to);