if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
        memcpy(nd_cmd.cmd.passphrase, key_data->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
                return -EIO;
        }
 
-       /* DIMM unlocked, invalidate all CPU caches before we read it */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
-
        return 0;
 }
 
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
-       /* flush all cache before we erase DIMM */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        memcpy(nd_cmd.cmd.passphrase, key->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
                return -ENXIO;
        }
 
-       /* DIMM erased, invalidate all CPU caches before we read it */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        return 0;
 }
 
        if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
                return -ENXIO;
        }
 
-       /* flush all cache before we make the nvdimms available */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        return 0;
 }
 
        if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
-       /* flush all cache before we erase DIMM */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        memcpy(nd_cmd.cmd.passphrase, nkey->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 };
 
 const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
-
-MODULE_IMPORT_NS(DEVMEM);
 
 /*
  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  */
+#include <linux/memregion.h>
 #include <linux/cpumask.h>
 #include <linux/module.h>
 #include <linux/device.h>
         */
        sysfs_put(nd_region->bb_state);
        nd_region->bb_state = NULL;
+
+       /*
+        * Try to flush caches here since a disabled region may be subject to
+        * secure erase while disabled, and previous dirty data should not be
+        * written back to a new instance of the region. This only matters on
+        * bare metal where security commands are available, so silent failure
+        * here is ok.
+        */
+       if (cpu_cache_has_invalidate_memregion())
+               cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
 }
 
 static int child_notify(struct device *dev, void *data)
 
        return 0;
 }
 
+static int nd_region_invalidate_memregion(struct nd_region *nd_region)
+{
+       int i, incoherent = 0;
+
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+               if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
+                       incoherent++;
+                       break;
+               }
+       }
+
+       if (!incoherent)
+               return 0;
+
+       if (!cpu_cache_has_invalidate_memregion()) {
+               if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
+                       dev_warn(
+                               &nd_region->dev,
+                               "Bypassing cpu_cache_invalidate_memergion() for testing!\n");
+                       goto out;
+               } else {
+                       dev_err(&nd_region->dev,
+                               "Failed to synchronize CPU cache state\n");
+                       return -ENXIO;
+               }
+       }
+
+       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+out:
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+               clear_bit(NDD_INCOHERENT, &nvdimm->flags);
+       }
+
+       return 0;
+}
+
 int nd_region_activate(struct nd_region *nd_region)
 {
-       int i, j, num_flush = 0;
+       int i, j, rc, num_flush = 0;
        struct nd_region_data *ndrd;
        struct device *dev = &nd_region->dev;
        size_t flush_data_size = sizeof(void *);
        }
        nvdimm_bus_unlock(&nd_region->dev);
 
+       rc = nd_region_invalidate_memregion(nd_region);
+       if (rc)
+               return rc;
+
        ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
        if (!ndrd)
                return -ENOMEM;
 
        return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
 }
+
+MODULE_IMPORT_NS(DEVMEM);
 
        rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
 
        nvdimm_put_key(key);
        nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
                return -ENOKEY;
 
        rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
                return -ENOKEY;
 
        rc = nvdimm->sec.ops->overwrite(nvdimm, data);
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
 
        NDD_WORK_PENDING = 4,
        /* dimm supports namespace labels */
        NDD_LABELING = 6,
+       /*
+        * dimm contents have changed requiring invalidation of CPU caches prior
+        * to activation of a region that includes this device
+        */
+       NDD_INCOHERENT = 7,
 
        /* need to set a limit somewhere, but yes, this is likely overkill */
        ND_IOCTL_MAX_BUFLEN = SZ_4M,