fsdax,xfs: port unshare to fsdax
authorShiyang Ruan <ruansy.fnst@fujitsu.com>
Thu, 1 Dec 2022 15:32:33 +0000 (15:32 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:17 +0000 (18:12 -0800)
Implement unshare in fsdax mode: copy data from srcmap to iomap.

Link: https://lkml.kernel.org/r/1669908753-169-1-git-send-email-ruansy.fnst@fujitsu.com
Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/dax.c
fs/xfs/xfs_reflink.c
include/linux/dax.h

index 8fb928cd9dce5ba1ebec1683b50f704bc015d45d..c48a3a93ab2974785d3346bf98c59c4110be3122 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1245,6 +1245,58 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 }
 #endif /* CONFIG_FS_DAX_PMD */
 
+static s64 dax_unshare_iter(struct iomap_iter *iter)
+{
+       struct iomap *iomap = &iter->iomap;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       loff_t pos = iter->pos;
+       loff_t length = iomap_length(iter);
+       int id = 0;
+       s64 ret = 0;
+       void *daddr = NULL, *saddr = NULL;
+
+       /* don't bother with blocks that are not shared to start with */
+       if (!(iomap->flags & IOMAP_F_SHARED))
+               return length;
+       /* don't bother with holes or unwritten extents */
+       if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+               return length;
+
+       id = dax_read_lock();
+       ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
+       if (ret < 0)
+               goto out_unlock;
+
+       ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
+       if (ret < 0)
+               goto out_unlock;
+
+       ret = copy_mc_to_kernel(daddr, saddr, length);
+       if (ret)
+               ret = -EIO;
+
+out_unlock:
+       dax_read_unlock(id);
+       return ret;
+}
+
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+               const struct iomap_ops *ops)
+{
+       struct iomap_iter iter = {
+               .inode          = inode,
+               .pos            = pos,
+               .len            = len,
+               .flags          = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
+       };
+       int ret;
+
+       while ((ret = iomap_iter(&iter, ops)) > 0)
+               iter.processed = dax_unshare_iter(&iter);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dax_file_unshare);
+
 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
 {
        const struct iomap *iomap = &iter->iomap;
index 93bdd25680bc9a4869f5b8bb2372a37dc5898ace..fe46bce8cae6382442244183a29c5ec506fe35c4 100644 (file)
@@ -1693,8 +1693,12 @@ xfs_reflink_unshare(
 
        inode_dio_wait(inode);
 
-       error = iomap_file_unshare(inode, offset, len,
-                       &xfs_buffered_write_iomap_ops);
+       if (IS_DAX(inode))
+               error = dax_file_unshare(inode, offset, len,
+                               &xfs_dax_write_iomap_ops);
+       else
+               error = iomap_file_unshare(inode, offset, len,
+                               &xfs_buffered_write_iomap_ops);
        if (error)
                goto out;
 
index ba985333e26bfa059e687a4bfb991f1bba7fe659..2b5ecb59105912d53245da4f80e47acdeafe669f 100644 (file)
@@ -205,6 +205,8 @@ static inline void dax_unlock_mapping_entry(struct address_space *mapping,
 }
 #endif
 
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+               const struct iomap_ops *ops);
 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
                const struct iomap_ops *ops);
 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,