fsdax,xfs: set the shared flag when file extent is shared
authorShiyang Ruan <ruansy.fnst@fujitsu.com>
Thu, 1 Dec 2022 15:28:54 +0000 (15:28 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:16 +0000 (18:12 -0800)
If a dax page is shared, mapread at different offsets can also trigger
page fault on same dax page.  So, change the flag from "cow" to "shared".
And get the shared flag from filesystem when read.

Link: https://lkml.kernel.org/r/1669908538-55-5-git-send-email-ruansy.fnst@fujitsu.com
Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/dax.c
fs/xfs/xfs_iomap.c

index 359b958eb835d2265700592a75f9bf77cb5a55f3..fa547ce41add451bf0d551e903c318e5a3f735bb 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -846,12 +846,6 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
                (iter->iomap.flags & IOMAP_F_DIRTY);
 }
 
-static bool dax_fault_is_cow(const struct iomap_iter *iter)
-{
-       return (iter->flags & IOMAP_WRITE) &&
-               (iter->iomap.flags & IOMAP_F_SHARED);
-}
-
 /*
  * By this point grab_mapping_entry() has ensured that we have a locked entry
  * of the appropriate size so we don't have to worry about downgrading PMDs to
@@ -865,13 +859,14 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        void *new_entry = dax_make_entry(pfn, flags);
-       bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
-       bool cow = dax_fault_is_cow(iter);
+       bool write = iter->flags & IOMAP_WRITE;
+       bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
+       bool shared = iter->iomap.flags & IOMAP_F_SHARED;
 
        if (dirty)
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
-       if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
+       if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
                unsigned long index = xas->xa_index;
                /* we are replacing a zero page with block mapping */
                if (dax_is_pmd_entry(entry))
@@ -883,12 +878,12 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
 
        xas_reset(xas);
        xas_lock_irq(xas);
-       if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+       if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
                void *old;
 
                dax_disassociate_entry(entry, mapping, false);
                dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
-                               cow);
+                               shared);
                /*
                 * Only swap our new entry into the page cache if the current
                 * entry is a zero page or an empty entry.  If a normal PTE or
@@ -908,7 +903,7 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
        if (dirty)
                xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 
-       if (cow)
+       if (write && shared)
                xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
 
        xas_unlock_irq(xas);
index 07da03976ec125c3d2cca979d2ff736e43252306..881de99766cad1b3c52e74103c58db1a53077f34 100644 (file)
@@ -1215,7 +1215,7 @@ xfs_read_iomap_begin(
                return error;
        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
                               &nimaps, 0);
-       if (!error && (flags & IOMAP_REPORT))
+       if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
                error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
        xfs_iunlock(ip, lockmode);