return NULL;
 
        domain->type = type;
+       domain->owner = ops;
        /*
         * If not already set, assume all sizes by default; the driver
         * may override this later
 static int __iommu_attach_group(struct iommu_domain *domain,
                                struct iommu_group *group)
 {
+       struct device *dev;
+
        if (group->domain && group->domain != group->default_domain &&
            group->domain != group->blocking_domain)
                return -EBUSY;
 
+       dev = iommu_group_first_dev(group);
+       if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+               return -EINVAL;
+
        return __iommu_group_set_domain(group, domain);
 }
 
        if (!group)
                return -ENODEV;
 
+       if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+               return -EINVAL;
+
        mutex_lock(&group->mutex);
        curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
        if (curr) {
 
                        hwpt->domain = NULL;
                        goto out_abort;
                }
+               hwpt->domain->owner = ops;
        } else {
                hwpt->domain = iommu_domain_alloc(idev->dev->bus);
                if (!hwpt->domain) {
                hwpt->domain = NULL;
                goto out_abort;
        }
+       hwpt->domain->owner = ops;
 
        if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
                rc = -EINVAL;
 
        unsigned type;
        const struct iommu_domain_ops *ops;
        const struct iommu_dirty_ops *dirty_ops;
-
+       const struct iommu_ops *owner; /* Whose domain_alloc we came from */
        unsigned long pgsize_bitmap;    /* Bitmap of page sizes in use */
        struct iommu_domain_geometry geometry;
        struct iommu_dma_cookie *iova_cookie;