for (; *str; ++str) {
if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
- iommu_set_dma_strict(true);
+ iommu_set_dma_strict();
}
if (strncmp(str, "force_enable", 12) == 0)
amd_iommu_force_enable = true;
iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
- iommu_set_dma_strict(true);
+ iommu_set_dma_strict();
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
*/
if (cap_caching_mode(iommu->cap)) {
pr_info_once("IOMMU batching disallowed due to virtualization\n");
- iommu_set_dma_strict(true);
+ iommu_set_dma_strict();
}
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
} else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
- iommu_set_dma_strict(true);
+ iommu_set_dma_strict();
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
}
early_param("iommu.strict", iommu_dma_setup);
-void iommu_set_dma_strict(bool strict)
+void iommu_set_dma_strict(void)
{
- if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
- iommu_dma_strict = strict;
+ iommu_dma_strict = true;
}
bool iommu_get_dma_strict(struct iommu_domain *domain)
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
unsigned long quirks);
-void iommu_set_dma_strict(bool val);
+void iommu_set_dma_strict(void);
bool iommu_get_dma_strict(struct iommu_domain *domain);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,