arm-smmu-v3/sva: Add SVA domain support
authorLu Baolu <baolu.lu@linux.intel.com>
Mon, 31 Oct 2022 00:59:12 +0000 (08:59 +0800)
committerJoerg Roedel <jroedel@suse.de>
Thu, 3 Nov 2022 14:47:49 +0000 (15:47 +0100)
Add support for SVA domain allocation and provide an SVA-specific
iommu_domain_ops. This implementation is based on the existing SVA
code. Possible cleanup and refactoring are left for incremental
changes later.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Link: https://lore.kernel.org/r/20221031005917.45690-9-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

index 8fcf0df4bd0eabcc0734c43bac61113fe4f12c5c..2d188d12419ec6d5158921070ff4902e02e78868 100644 (file)
@@ -549,3 +549,64 @@ void arm_smmu_sva_notifier_synchronize(void)
         */
        mmu_notifier_synchronize();
 }
+
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+                                  struct device *dev, ioasid_t id)
+{
+       struct mm_struct *mm = domain->mm;
+       struct arm_smmu_bond *bond = NULL, *t;
+       struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+       mutex_lock(&sva_lock);
+       list_for_each_entry(t, &master->bonds, list) {
+               if (t->mm == mm) {
+                       bond = t;
+                       break;
+               }
+       }
+
+       if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
+               list_del(&bond->list);
+               arm_smmu_mmu_notifier_put(bond->smmu_mn);
+               kfree(bond);
+       }
+       mutex_unlock(&sva_lock);
+}
+
+static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
+                                     struct device *dev, ioasid_t id)
+{
+       int ret = 0;
+       struct iommu_sva *handle;
+       struct mm_struct *mm = domain->mm;
+
+       mutex_lock(&sva_lock);
+       handle = __arm_smmu_sva_bind(dev, mm);
+       if (IS_ERR(handle))
+               ret = PTR_ERR(handle);
+       mutex_unlock(&sva_lock);
+
+       return ret;
+}
+
+static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
+{
+       kfree(domain);
+}
+
+static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
+       .set_dev_pasid          = arm_smmu_sva_set_dev_pasid,
+       .free                   = arm_smmu_sva_domain_free
+};
+
+struct iommu_domain *arm_smmu_sva_domain_alloc(void)
+{
+       struct iommu_domain *domain;
+
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+       domain->ops = &arm_smmu_sva_domain_ops;
+
+       return domain;
+}
index 21cb13da122c85c5f9b9da18de19b99bf5ca408f..eed2eb8effa3fcf7db206cb2227d5f21abf1ddb4 100644 (file)
@@ -2009,6 +2009,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
        struct arm_smmu_domain *smmu_domain;
 
+       if (type == IOMMU_DOMAIN_SVA)
+               return arm_smmu_sva_domain_alloc();
+
        if (type != IOMMU_DOMAIN_UNMANAGED &&
            type != IOMMU_DOMAIN_DMA &&
            type != IOMMU_DOMAIN_DMA_FQ &&
@@ -2838,6 +2841,17 @@ static int arm_smmu_def_domain_type(struct device *dev)
        return 0;
 }
 
+static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+       struct iommu_domain *domain;
+
+       domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
+       if (WARN_ON(IS_ERR(domain)) || !domain)
+               return;
+
+       arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
+}
+
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
@@ -2846,6 +2860,7 @@ static struct iommu_ops arm_smmu_ops = {
        .device_group           = arm_smmu_device_group,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
+       .remove_dev_pasid       = arm_smmu_remove_dev_pasid,
        .dev_enable_feat        = arm_smmu_dev_enable_feature,
        .dev_disable_feat       = arm_smmu_dev_disable_feature,
        .sva_bind               = arm_smmu_sva_bind,
index d2ba86470c4239439648b84b3c9de5af067c7d0f..5aa853e98d388fa00e4bc60a3b866042e289a185 100644 (file)
@@ -758,6 +758,9 @@ struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm);
 void arm_smmu_sva_unbind(struct iommu_sva *handle);
 u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle);
 void arm_smmu_sva_notifier_synchronize(void);
+struct iommu_domain *arm_smmu_sva_domain_alloc(void);
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+                                  struct device *dev, ioasid_t id);
 #else /* CONFIG_ARM_SMMU_V3_SVA */
 static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
 {
@@ -803,5 +806,16 @@ static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
 }
 
 static inline void arm_smmu_sva_notifier_synchronize(void) {}
+
+static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void)
+{
+       return NULL;
+}
+
+static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+                                                struct device *dev,
+                                                ioasid_t id)
+{
+}
 #endif /* CONFIG_ARM_SMMU_V3_SVA */
 #endif /* _ARM_SMMU_V3_H */