iommu/amd: Add domain_alloc_user based domain allocation
authorJoao Martins <joao.m.martins@oracle.com>
Tue, 24 Oct 2023 13:51:01 +0000 (14:51 +0100)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 24 Oct 2023 14:58:43 +0000 (11:58 -0300)
Add the domain_alloc_user op implementation. To that end, refactor
amd_iommu_domain_alloc() to receive a dev pointer and flags, while renaming
it too, such that it becomes a common function shared with
domain_alloc_user() implementation. The sole difference with
domain_alloc_user() is that we initialize also other fields that
iommu_domain_alloc() does. It lets it return the iommu domain correctly
initialized in one function.

This is in preparation to add dirty enforcement on AMD implementation of
domain_alloc_user.

Link: https://lore.kernel.org/r/20231024135109.73787-11-joao.m.martins@oracle.com
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/amd/iommu.c

index 95bd7c25ba6f366b5db2582e8cb5318491cbb523..667e23b0ab0d83540366efd4e4db1c6643408c9c 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/dma.h>
+#include <uapi/linux/iommufd.h>
 
 #include "amd_iommu.h"
 #include "../dma-iommu.h"
@@ -2155,28 +2156,64 @@ static inline u64 dma_max_address(void)
        return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
 }
 
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
+                                                 struct device *dev, u32 flags)
 {
        struct protection_domain *domain;
+       struct amd_iommu *iommu = NULL;
+
+       if (dev) {
+               iommu = rlookup_amd_iommu(dev);
+               if (!iommu)
+                       return ERR_PTR(-ENODEV);
+       }
 
        /*
         * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
         * default to use IOMMU_DOMAIN_DMA[_FQ].
         */
        if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        domain = protection_domain_alloc(type);
        if (!domain)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        domain->domain.geometry.aperture_start = 0;
        domain->domain.geometry.aperture_end   = dma_max_address();
        domain->domain.geometry.force_aperture = true;
 
+       if (iommu) {
+               domain->domain.type = type;
+               domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
+               domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+       }
+
        return &domain->domain;
 }
 
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
+{
+       struct iommu_domain *domain;
+
+       domain = do_iommu_domain_alloc(type, NULL, 0);
+       if (IS_ERR(domain))
+               return NULL;
+
+       return domain;
+}
+
+static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev,
+                                                       u32 flags)
+{
+       unsigned int type = IOMMU_DOMAIN_UNMANAGED;
+
+       if (flags)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       return do_iommu_domain_alloc(type, dev, flags);
+}
+
 static void amd_iommu_domain_free(struct iommu_domain *dom)
 {
        struct protection_domain *domain;
@@ -2464,6 +2501,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
 const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
+       .domain_alloc_user = amd_iommu_domain_alloc_user,
        .probe_device = amd_iommu_probe_device,
        .release_device = amd_iommu_release_device,
        .probe_finalize = amd_iommu_probe_finalize,