/*
         * Now that we've unmap all full blocks we'll have to zero out any
-        * partial block at the beginning and/or end.  iomap_zero_range is smart
+        * partial block at the beginning and/or end.  xfs_zero_range is smart
         * enough to skip any holes, including those we just created, but we
         * must take care not to zero beyond EOF and enlarge i_size.
         */
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
-                       &xfs_buffered_write_iomap_ops);
+       error = xfs_zero_range(ip, offset, len, NULL);
        if (error)
                return error;
 
        /*
         * If we zeroed right up to EOF and EOF straddles a page boundary we
         * must make sure that the post-EOF area is also zeroed because the
-        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * page could be mmap'd and xfs_zero_range doesn't do that for us.
         * Writeback of the eof page will do this, albeit clumsily.
         */
        if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
 
                }
 
                trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
-               error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
-                               NULL, &xfs_buffered_write_iomap_ops);
+               error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
                if (error)
                        return error;
        } else
 
 const struct iomap_ops xfs_xattr_iomap_ops = {
        .iomap_begin            = xfs_xattr_iomap_begin,
 };
+
+int
+xfs_zero_range(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       loff_t                  len,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       return iomap_zero_range(inode, pos, len, did_zero,
+                               &xfs_buffered_write_iomap_ops);
+}
+
+int
+xfs_truncate_page(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       return iomap_truncate_page(inode, pos, did_zero,
+                                  &xfs_buffered_write_iomap_ops);
+}
 
 int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
                struct xfs_bmbt_irec *, u16);
 
+int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
+               bool *did_zero);
+int xfs_truncate_page(struct xfs_inode *ip, loff_t pos, bool *did_zero);
+
 static inline xfs_filblks_t
 xfs_aligned_fsb_count(
        xfs_fileoff_t           offset_fsb,
 
         */
        if (newsize > oldsize) {
                trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
-               error = iomap_zero_range(inode, oldsize, newsize - oldsize,
-                               &did_zeroing, &xfs_buffered_write_iomap_ops);
+               error = xfs_zero_range(ip, oldsize, newsize - oldsize,
+                               &did_zeroing);
        } else {
                /*
                 * iomap won't detect a dirty page over an unwritten block (or a
                                                     newsize);
                if (error)
                        return error;
-               error = iomap_truncate_page(inode, newsize, &did_zeroing,
-                               &xfs_buffered_write_iomap_ops);
+               error = xfs_truncate_page(ip, newsize, &did_zeroing);
        }
 
        if (error)
 
                return 0;
 
        trace_xfs_zero_eof(ip, isize, pos - isize);
-       return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
-                       &xfs_buffered_write_iomap_ops);
+       return xfs_zero_range(ip, isize, pos - isize, NULL);
 }
 
 /*