struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable;
----- -- int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
+++++ ++ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+++++ ++ if (!domain)
+++++ ++ return NULL;
+++++ ++
+++++ ++ domain->id = domain_id_alloc();
+++++ ++ if (!domain->id)
+++++ ++ goto out_err;
+++++ ++
+++++ ++ spin_lock_init(&domain->lock);
+++++ ++ INIT_LIST_HEAD(&domain->dev_list);
+++++ ++ domain->nid = NUMA_NO_NODE;
+++++ ++
+++++ ++ switch (type) {
+++++ ++ /* No need to allocate io pgtable ops in passthrough mode */
+++++ ++ case IOMMU_DOMAIN_IDENTITY:
+++++ ++ return domain;
+++++ ++ case IOMMU_DOMAIN_DMA:
+++++ ++ pgtable = amd_iommu_pgtable;
+++++ ++ break;
/*
----- -- * Force IOMMU v1 page table when iommu=pt and
----- -- * when allocating domain for pass-through devices.
+++++ ++ * Force IOMMU v1 page table when allocating
+++++ ++ * domain for pass-through devices.
*/
----- -- if (type == IOMMU_DOMAIN_IDENTITY) {
----- - pgtable = AMD_IOMMU_V1;
----- - mode = PAGE_MODE_NONE;
----- - } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+++++ ++ case IOMMU_DOMAIN_UNMANAGED:
pgtable = AMD_IOMMU_V1;
- mode = PAGE_MODE_NONE;
- } else if (type == IOMMU_DOMAIN_UNMANAGED) {
- pgtable = AMD_IOMMU_V1;
----- -- } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
----- -- pgtable = amd_iommu_pgtable;
----- -- } else {
----- -- return NULL;
+++++ ++ break;
+++++ ++ default:
+++++ ++ goto out_err;
}
----- -- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
----- -- if (!domain)
----- -- return NULL;
----- --
switch (pgtable) {
case AMD_IOMMU_V1:
----- -- ret = protection_domain_init_v1(domain, mode);
+++++ ++ ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);
if (ret)
return ret;
------ - switch (domain->type) {
- default:
----- - case IOMMU_DOMAIN_DMA:
----- - case IOMMU_DOMAIN_UNMANAGED:
------ - ret = apple_dart_domain_add_streams(dart_domain, cfg);
------ - if (ret)
------ - return ret;
++++++ + for_each_stream_map(i, cfg, stream_map)
++++++ + apple_dart_setup_translation(dart_domain, stream_map);
++++++ + return 0;
++++++ +}
------ - for_each_stream_map(i, cfg, stream_map)
------ - apple_dart_setup_translation(dart_domain, stream_map);
------ - break;
------ - case IOMMU_DOMAIN_BLOCKED:
------ - for_each_stream_map(i, cfg, stream_map)
------ - apple_dart_hw_disable_dma(stream_map);
------ - break;
------ - case IOMMU_DOMAIN_IDENTITY:
------ - for_each_stream_map(i, cfg, stream_map)
------ - apple_dart_hw_enable_bypass(stream_map);
------ - break;
------ - }
++++++ +static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
++++++ + struct device *dev)
++++++ +{
++++++ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
++++++ + struct apple_dart_stream_map *stream_map;
++++++ + int i;
------ - return ret;
++++++ + if (!cfg->stream_maps[0].dart->supports_bypass)
++++++ + return -EINVAL;
++++++ +
++++++ + for_each_stream_map(i, cfg, stream_map)
++++++ + apple_dart_hw_enable_bypass(stream_map);
++++++ + return 0;
}
++++++ +static const struct iommu_domain_ops apple_dart_identity_ops = {
++++++ + .attach_dev = apple_dart_attach_dev_identity,
++++++ +};
++++++ +
++++++ +static struct iommu_domain apple_dart_identity_domain = {
++++++ + .type = IOMMU_DOMAIN_IDENTITY,
++++++ + .ops = &apple_dart_identity_ops,
++++++ +};
++++++ +
++++++ +static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
++++++ + struct device *dev)
++++++ +{
++++++ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
++++++ + struct apple_dart_stream_map *stream_map;
++++++ + int i;
++++++ +
++++++ + for_each_stream_map(i, cfg, stream_map)
++++++ + apple_dart_hw_disable_dma(stream_map);
++++++ + return 0;
++++++ +}
++++++ +
++++++ +static const struct iommu_domain_ops apple_dart_blocked_ops = {
++++++ + .attach_dev = apple_dart_attach_dev_blocked,
++++++ +};
++++++ +
++++++ +static struct iommu_domain apple_dart_blocked_domain = {
++++++ + .type = IOMMU_DOMAIN_BLOCKED,
++++++ + .ops = &apple_dart_blocked_ops,
++++++ +};
++++++ +
static struct iommu_device *apple_dart_probe_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
lockdep_assert_held(&group->mutex);
- return ERR_PTR(-EINVAL);
++++++ /*
++++++ * Allow legacy drivers to specify the domain that will be the default
++++++ * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM
++++++ * domain. Do not use in new drivers.
++++++ */
++++++ if (ops->default_domain) {
++++++ if (req_type)
++++++ + return NULL;
++++++ return ops->default_domain;
++++++ }
++++++
if (req_type)
------ return __iommu_group_alloc_default_domain(bus, group, req_type);
++++++ return __iommu_group_alloc_default_domain(group, req_type);
/* The driver gave no guidance on what type to use, try the default */
------ dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type);
++++++ dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
if (dom)
return dom;
lockdep_assert_held(&group->mutex);
++++++ /*
++++++ * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an
++++++ * identity_domain and it will automatically become their default
++++++ * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain.
++++++ * Override the selection to IDENTITY.
++++++ */
++++++ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
++++++ static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) &&
++++++ IS_ENABLED(CONFIG_IOMMU_DMA)));
++++++ driver_type = IOMMU_DOMAIN_IDENTITY;
++++++ }
++++++
for_each_group_device(group, gdev) {
------ unsigned int type = iommu_get_def_domain_type(gdev->dev);
------
------ if (best_type && type && best_type != type) {
------ if (target_type) {
------ dev_err_ratelimited(
------ gdev->dev,
------ "Device cannot be in %s domain\n",
------ iommu_domain_type_str(target_type));
++++++ driver_type = iommu_get_def_domain_type(group, gdev->dev,
++++++ driver_type);
++++++
++++++ if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) {
++++++ /*
++++++ * No ARM32 using systems will set untrusted, it cannot
++++++ * work.
++++++ */
++++++ if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)))
return -1;
------ }
++++++ untrusted = gdev->dev;
++++++ }
++++++ }
------ dev_warn(
------ gdev->dev,
------ "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
------ iommu_domain_type_str(type), dev_name(last_dev),
------ iommu_domain_type_str(best_type));
------ return 0;
++++++ + /*
++++++ + * If the common dma ops are not selected in kconfig then we cannot use
++++++ + * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been
++++++ + * selected.
++++++ + */
++++++ + if (!IS_ENABLED(CONFIG_IOMMU_DMA)) {
++++++ + if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA))
++++++ + return -1;
++++++ + if (!driver_type)
++++++ + driver_type = IOMMU_DOMAIN_IDENTITY;
++++++ + }
++++++ +
++++++ if (untrusted) {
++++++ if (driver_type && driver_type != IOMMU_DOMAIN_DMA) {
++++++ dev_err_ratelimited(
++++++ untrusted,
++++++ "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n",
++++++ group->id, iommu_domain_type_str(driver_type));
++++++ return -1;
}
------ if (!best_type)
------ best_type = type;
------ last_dev = gdev->dev;
++++++ driver_type = IOMMU_DOMAIN_DMA;
}
------ return best_type;
++++++
++++++ if (target_type) {
++++++ if (driver_type && target_type != driver_type)
++++++ return -1;
++++++ return target_type;
++++++ }
++++++ return driver_type;
}
static void iommu_group_do_probe_finalize(struct device *dev)
struct iommu_domain *domain;
unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS;
------ if (bus == NULL || bus->iommu_ops == NULL)
++++++ if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain)
++++++ return ops->identity_domain;
++++++ + else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain)
++++++ + return ops->blocked_domain;
++++++ else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging)
++++++ domain = ops->domain_alloc_paging(dev);
++++++ else if (ops->domain_alloc)
++++++ domain = ops->domain_alloc(alloc_type);
++++++ else
return NULL;
------ domain = bus->iommu_ops->domain_alloc(alloc_type);
if (!domain)
return NULL;
return -EINVAL;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
------- if (ret == 0 && ops->iotlb_sync_map)
------- ops->iotlb_sync_map(domain, iova, size);
+++++++ if (ret == 0 && ops->iotlb_sync_map) {
+++++++ ret = ops->iotlb_sync_map(domain, iova, size);
+++++++ if (ret)
+++++++ goto out_err;
+++++++ }
+
+ return ret;
------ }
------ EXPORT_SYMBOL_GPL(iommu_map);
+
------ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
------ unsigned long iova, size_t size,
------ struct iommu_iotlb_gather *iotlb_gather)
------ {
------ const struct iommu_domain_ops *ops = domain->ops;
------ size_t pgsize, count;
+++++++ out_err:
+++++++ /* undo mappings already done */
+++++++ iommu_unmap(domain, iova, size);
------ pgsize = iommu_pgsize(domain, iova, iova, size, &count);
------ return ops->unmap_pages ?
------ ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
------ ops->unmap(domain, iova, pgsize, iotlb_gather);
++++++ return ret;
}
++++++ EXPORT_SYMBOL_GPL(iommu_map);
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,