goto err_alloc_dax;
        }
        set_dax_synchronous(dax_dev);
+       set_dax_nocache(dax_dev);
+       set_dax_nomc(dax_dev);
 
        /* a device_dax instance is dead while the driver is not attached */
        kill_dax(dax_dev);
 
        DAXDEV_WRITE_CACHE,
        /* flag to check if device supports synchronous flush */
        DAXDEV_SYNC,
+       /* do not leave the caches dirty after writes */
+       DAXDEV_NOCACHE,
+       /* handle CPU fetch exceptions during reads */
+       DAXDEV_NOMC,
 };
 
 /**
        if (!dax_alive(dax_dev))
                return 0;
 
-       return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+       /*
+        * The userspace address for the memory copy has already been validated
+        * via access_ok() in vfs_write, so use the 'no check' version to bypass
+        * the HARDENED_USERCOPY overhead.
+        */
+       if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
+               return _copy_from_iter_flushcache(addr, bytes, i);
+       return _copy_from_iter(addr, bytes, i);
 }
-EXPORT_SYMBOL_GPL(dax_copy_from_iter);
 
 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
                size_t bytes, struct iov_iter *i)
        if (!dax_alive(dax_dev))
                return 0;
 
-       return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+       /*
+        * The userspace address for the memory copy has already been validated
+        * via access_ok() in vfs_red, so use the 'no check' version to bypass
+        * the HARDENED_USERCOPY overhead.
+        */
+       if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
+               return _copy_mc_to_iter(addr, bytes, i);
+       return _copy_to_iter(addr, bytes, i);
 }
-EXPORT_SYMBOL_GPL(dax_copy_to_iter);
 
 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
                        size_t nr_pages)
 }
 EXPORT_SYMBOL_GPL(set_dax_synchronous);
 
+void set_dax_nocache(struct dax_device *dax_dev)
+{
+       set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nocache);
+
+void set_dax_nomc(struct dax_device *dax_dev)
+{
+       set_bit(DAXDEV_NOMC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nomc);
+
 bool dax_alive(struct dax_device *dax_dev)
 {
        lockdep_assert_held(&dax_srcu);
 
        return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
 }
 
-static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
-
-       return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
-
-       return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
                                      size_t nr_pages)
 {
 
 #else
 #define linear_dax_direct_access NULL
-#define linear_dax_copy_from_iter NULL
-#define linear_dax_copy_to_iter NULL
 #define linear_dax_zero_page_range NULL
 #endif
 
        .prepare_ioctl = linear_prepare_ioctl,
        .iterate_devices = linear_iterate_devices,
        .direct_access = linear_dax_direct_access,
-       .dax_copy_from_iter = linear_dax_copy_from_iter,
-       .dax_copy_to_iter = linear_dax_copy_to_iter,
        .dax_zero_page_range = linear_dax_zero_page_range,
 };
 
 
 }
 
 #if IS_ENABLED(CONFIG_FS_DAX)
-static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
-                  struct iov_iter *i)
-{
-       struct pending_block *block;
-
-       if (!bytes)
-               return 0;
-
-       block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
-       if (!block) {
-               DMERR("Error allocating dax pending block");
-               return -ENOMEM;
-       }
-
-       block->data = kzalloc(bytes, GFP_KERNEL);
-       if (!block->data) {
-               DMERR("Error allocating dax data space");
-               kfree(block);
-               return -ENOMEM;
-       }
-
-       /* write data provided via the iterator */
-       if (!copy_from_iter(block->data, bytes, i)) {
-               DMERR("Error copying dax data");
-               kfree(block->data);
-               kfree(block);
-               return -EIO;
-       }
-
-       /* rewind the iterator so that the block driver can use it */
-       iov_iter_revert(i, bytes);
-
-       block->datalen = bytes;
-       block->sector = bio_to_dev_sectors(lc, sector);
-       block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
-
-       atomic_inc(&lc->pending_blocks);
-       spin_lock_irq(&lc->blocks_lock);
-       list_add_tail(&block->list, &lc->unflushed_blocks);
-       spin_unlock_irq(&lc->blocks_lock);
-       wake_up_process(lc->log_kthread);
-
-       return 0;
-}
-
 static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
                pgoff_t *pgoff)
 {
        return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
 }
 
-static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
-                                           pgoff_t pgoff, void *addr, size_t bytes,
-                                           struct iov_iter *i)
-{
-       struct log_writes_c *lc = ti->private;
-       sector_t sector = pgoff * PAGE_SECTORS;
-       struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
-       int err;
-
-       /* Don't bother doing anything if logging has been disabled */
-       if (!lc->logging_enabled)
-               goto dax_copy;
-
-       err = log_dax(lc, sector, bytes, i);
-       if (err) {
-               DMWARN("Error %d logging DAX write", err);
-               return 0;
-       }
-dax_copy:
-       return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
-                                         pgoff_t pgoff, void *addr, size_t bytes,
-                                         struct iov_iter *i)
-{
-       struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
-
-       return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
 static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
                                          size_t nr_pages)
 {
 
 #else
 #define log_writes_dax_direct_access NULL
-#define log_writes_dax_copy_from_iter NULL
-#define log_writes_dax_copy_to_iter NULL
 #define log_writes_dax_zero_page_range NULL
 #endif
 
        .iterate_devices = log_writes_iterate_devices,
        .io_hints = log_writes_io_hints,
        .direct_access = log_writes_dax_direct_access,
-       .dax_copy_from_iter = log_writes_dax_copy_from_iter,
-       .dax_copy_to_iter = log_writes_dax_copy_to_iter,
        .dax_zero_page_range = log_writes_dax_zero_page_range,
 };
 
 
        return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
 }
 
-static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
-
-       return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
-
-       return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
 static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
                                      size_t nr_pages)
 {
 
 #else
 #define stripe_dax_direct_access NULL
-#define stripe_dax_copy_from_iter NULL
-#define stripe_dax_copy_to_iter NULL
 #define stripe_dax_zero_page_range NULL
 #endif
 
        .iterate_devices = stripe_iterate_devices,
        .io_hints = stripe_io_hints,
        .direct_access = stripe_dax_direct_access,
-       .dax_copy_from_iter = stripe_dax_copy_from_iter,
-       .dax_copy_to_iter = stripe_dax_copy_to_iter,
        .dax_zero_page_range = stripe_dax_zero_page_range,
 };
 
 
        return ret;
 }
 
-static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
-                                   void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct mapped_device *md = dax_get_private(dax_dev);
-       sector_t sector = pgoff * PAGE_SECTORS;
-       struct dm_target *ti;
-       long ret = 0;
-       int srcu_idx;
-
-       ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
-       if (!ti)
-               goto out;
-       if (!ti->type->dax_copy_from_iter) {
-               ret = copy_from_iter(addr, bytes, i);
-               goto out;
-       }
-       ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
- out:
-       dm_put_live_table(md, srcu_idx);
-
-       return ret;
-}
-
-static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       struct mapped_device *md = dax_get_private(dax_dev);
-       sector_t sector = pgoff * PAGE_SECTORS;
-       struct dm_target *ti;
-       long ret = 0;
-       int srcu_idx;
-
-       ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
-       if (!ti)
-               goto out;
-       if (!ti->type->dax_copy_to_iter) {
-               ret = copy_to_iter(addr, bytes, i);
-               goto out;
-       }
-       ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
- out:
-       dm_put_live_table(md, srcu_idx);
-
-       return ret;
-}
-
 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
                                  size_t nr_pages)
 {
                        md->dax_dev = NULL;
                        goto bad;
                }
+               set_dax_nocache(md->dax_dev);
+               set_dax_nomc(md->dax_dev);
                if (dax_add_host(md->dax_dev, md->disk))
                        goto bad;
        }
 
 static const struct dax_operations dm_dax_ops = {
        .direct_access = dm_dax_direct_access,
-       .copy_from_iter = dm_dax_copy_from_iter,
-       .copy_to_iter = dm_dax_copy_to_iter,
        .zero_page_range = dm_dax_zero_page_range,
 };
 
 
        return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
 }
 
-/*
- * Bounds checking, both file offset and device offset, is handled by
- * dax_iomap_actor()
- */
-static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
-{
-       return _copy_mc_to_iter(addr, bytes, i);
-}
-
 static const struct dax_operations pmem_dax_ops = {
        .direct_access = pmem_dax_direct_access,
-       .copy_from_iter = pmem_copy_from_iter,
-       .copy_to_iter = pmem_copy_to_iter,
        .zero_page_range = pmem_dax_zero_page_range,
 };
 
                rc = PTR_ERR(dax_dev);
                goto out;
        }
+       set_dax_nocache(dax_dev);
+       set_dax_nomc(dax_dev);
        if (is_nvdimm_sync(nd_region))
                set_dax_synchronous(dax_dev);
        rc = dax_add_host(dax_dev, disk);
 
        .release        = dcssblk_release,
 };
 
-static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
-               pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
-       return copy_from_iter(addr, bytes, i);
-}
-
-static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
-               pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
-       return copy_to_iter(addr, bytes, i);
-}
-
 static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
                                       pgoff_t pgoff, size_t nr_pages)
 {
 
 static const struct dax_operations dcssblk_dax_ops = {
        .direct_access = dcssblk_dax_direct_access,
-       .copy_from_iter = dcssblk_dax_copy_from_iter,
-       .copy_to_iter = dcssblk_dax_copy_to_iter,
        .zero_page_range = dcssblk_dax_zero_page_range,
 };
 
 
                if (map_len > end - pos)
                        map_len = end - pos;
 
-               /*
-                * The userspace address for the memory copy has already been
-                * validated via access_ok() in either vfs_read() or
-                * vfs_write(), depending on which operation we are doing.
-                */
                if (iov_iter_rw(iter) == WRITE)
                        xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
                                        map_len, iter);
 
        return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
 }
 
-static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
-                                      pgoff_t pgoff, void *addr,
-                                      size_t bytes, struct iov_iter *i)
-{
-       return copy_from_iter(addr, bytes, i);
-}
-
-static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
-                                      pgoff_t pgoff, void *addr,
-                                      size_t bytes, struct iov_iter *i)
-{
-       return copy_to_iter(addr, bytes, i);
-}
-
 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
                                     pgoff_t pgoff, size_t nr_pages)
 {
 
 static const struct dax_operations virtio_fs_dax_ops = {
        .direct_access = virtio_fs_direct_access,
-       .copy_from_iter = virtio_fs_copy_from_iter,
-       .copy_to_iter = virtio_fs_copy_to_iter,
        .zero_page_range = virtio_fs_zero_page_range,
 };
 
 
         */
        bool (*dax_supported)(struct dax_device *, struct block_device *, int,
                        sector_t, sector_t);
-       /* copy_from_iter: required operation for fs-dax direct-i/o */
-       size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
-                       struct iov_iter *);
-       /* copy_to_iter: required operation for fs-dax direct-i/o */
-       size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
-                       struct iov_iter *);
        /* zero_page_range: required operation. Zero page range   */
        int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
 };
 }
 #endif
 
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+
 struct writeback_control;
 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
 
  */
 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i);
 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
                size_t nr_pages);
 
        dm_iterate_devices_fn iterate_devices;
        dm_io_hints_fn io_hints;
        dm_dax_direct_access_fn direct_access;
-       dm_dax_copy_iter_fn dax_copy_from_iter;
-       dm_dax_copy_iter_fn dax_copy_to_iter;
        dm_dax_zero_page_range_fn dax_zero_page_range;
 
        /* For internal device-mapper use. */