iommu/amd: Move AMD IOMMU driver into subdirectory
authorJoerg Roedel <jroedel@suse.de>
Tue, 9 Jun 2020 13:03:02 +0000 (15:03 +0200)
committerJoerg Roedel <jroedel@suse.de>
Wed, 10 Jun 2020 15:46:42 +0000 (17:46 +0200)
Move all files related to the AMD IOMMU driver into its own
subdirectory.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20200609130303.26974-2-joro@8bytes.org
16 files changed:
MAINTAINERS
drivers/iommu/Makefile
drivers/iommu/amd/amd_iommu.h [new file with mode: 0644]
drivers/iommu/amd/amd_iommu_types.h [new file with mode: 0644]
drivers/iommu/amd/debugfs.c [new file with mode: 0644]
drivers/iommu/amd/init.c [new file with mode: 0644]
drivers/iommu/amd/iommu.c [new file with mode: 0644]
drivers/iommu/amd/iommu_v2.c [new file with mode: 0644]
drivers/iommu/amd/quirks.c [new file with mode: 0644]
drivers/iommu/amd_iommu.c [deleted file]
drivers/iommu/amd_iommu.h [deleted file]
drivers/iommu/amd_iommu_debugfs.c [deleted file]
drivers/iommu/amd_iommu_init.c [deleted file]
drivers/iommu/amd_iommu_quirks.c [deleted file]
drivers/iommu/amd_iommu_types.h [deleted file]
drivers/iommu/amd_iommu_v2.c [deleted file]

index 50659d76976b712dffa0e14e287f2e142235116a..dd59ec6676d9bdd3951cf2ddd320640adc957fe3 100644 (file)
@@ -876,7 +876,7 @@ M:  Joerg Roedel <joro@8bytes.org>
 L:     iommu@lists.linux-foundation.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-F:     drivers/iommu/amd_iommu*.[ch]
+F:     drivers/iommu/amd/
 F:     include/linux/amd-iommu.h
 
 AMD KFD
index 57cf4ba5e27cb30689d846a376bd74ff72e6c4e3..3af7e374b0cba7e695c092aa4b54e9e1c600746b 100644 (file)
@@ -11,9 +11,9 @@ obj-$(CONFIG_IOASID) += ioasid.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
-obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o
+obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o
 obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
 arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
new file mode 100644 (file)
index 0000000..f892992
--- /dev/null
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+#include <linux/iommu.h>
+
+#include "amd_iommu_types.h"
+
+extern int amd_iommu_get_num_iommus(void);
+extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
+extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
+extern int amd_iommu_init_api(void);
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
+/* Needed for interrupt remapping */
+extern int amd_iommu_prepare(void);
+extern int amd_iommu_enable(void);
+extern void amd_iommu_disable(void);
+extern int amd_iommu_reenable(int);
+extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+                               u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+                                    unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#ifdef CONFIG_IRQ_REMAP
+extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+       return 0;
+}
+#endif
+
+#define PPR_SUCCESS                    0x0
+#define PPR_INVALID                    0x1
+#define PPR_FAILURE                    0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+                                 int status, int tag);
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+{
+       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+               return false;
+
+       return !!(iommu->features & f);
+}
+
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+       return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+       return phys_to_virt(__sme_clr(paddr));
+}
+
+extern bool translation_pre_enabled(struct amd_iommu *iommu);
+extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+                                        struct device *dev);
+extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+                                    bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
new file mode 100644 (file)
index 0000000..30a5d41
--- /dev/null
@@ -0,0 +1,907 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ *         Leo Duran <leo.duran@amd.com>
+ */
+
+#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
+#define _ASM_X86_AMD_IOMMU_TYPES_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/irqreturn.h>
+
+/*
+ * Maximum number of IOMMUs supported
+ */
+#define MAX_IOMMUS     32
+
+/*
+ * some size calculation constants
+ */
+#define DEV_TABLE_ENTRY_SIZE           32
+#define ALIAS_TABLE_ENTRY_SIZE         2
+#define RLOOKUP_TABLE_ENTRY_SIZE       (sizeof(void *))
+
+/* Capability offsets used by the driver */
+#define MMIO_CAP_HDR_OFFSET    0x00
+#define MMIO_RANGE_OFFSET      0x0c
+#define MMIO_MISC_OFFSET       0x10
+
+/* Masks, shifts and macros to parse the device range capability */
+#define MMIO_RANGE_LD_MASK     0xff000000
+#define MMIO_RANGE_FD_MASK     0x00ff0000
+#define MMIO_RANGE_BUS_MASK    0x0000ff00
+#define MMIO_RANGE_LD_SHIFT    24
+#define MMIO_RANGE_FD_SHIFT    16
+#define MMIO_RANGE_BUS_SHIFT   8
+#define MMIO_GET_LD(x)  (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
+#define MMIO_GET_FD(x)  (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
+#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
+#define MMIO_MSI_NUM(x)        ((x) & 0x1f)
+
+/* Flag masks for the AMD IOMMU exclusion range */
+#define MMIO_EXCL_ENABLE_MASK 0x01ULL
+#define MMIO_EXCL_ALLOW_MASK  0x02ULL
+
+/* Used offsets into the MMIO space */
+#define MMIO_DEV_TABLE_OFFSET   0x0000
+#define MMIO_CMD_BUF_OFFSET     0x0008
+#define MMIO_EVT_BUF_OFFSET     0x0010
+#define MMIO_CONTROL_OFFSET     0x0018
+#define MMIO_EXCL_BASE_OFFSET   0x0020
+#define MMIO_EXCL_LIMIT_OFFSET  0x0028
+#define MMIO_EXT_FEATURES      0x0030
+#define MMIO_PPR_LOG_OFFSET    0x0038
+#define MMIO_GA_LOG_BASE_OFFSET        0x00e0
+#define MMIO_GA_LOG_TAIL_OFFSET        0x00e8
+#define MMIO_MSI_ADDR_LO_OFFSET        0x015C
+#define MMIO_MSI_ADDR_HI_OFFSET        0x0160
+#define MMIO_MSI_DATA_OFFSET   0x0164
+#define MMIO_INTCAPXT_EVT_OFFSET       0x0170
+#define MMIO_INTCAPXT_PPR_OFFSET       0x0178
+#define MMIO_INTCAPXT_GALOG_OFFSET     0x0180
+#define MMIO_CMD_HEAD_OFFSET   0x2000
+#define MMIO_CMD_TAIL_OFFSET   0x2008
+#define MMIO_EVT_HEAD_OFFSET   0x2010
+#define MMIO_EVT_TAIL_OFFSET   0x2018
+#define MMIO_STATUS_OFFSET     0x2020
+#define MMIO_PPR_HEAD_OFFSET   0x2030
+#define MMIO_PPR_TAIL_OFFSET   0x2038
+#define MMIO_GA_HEAD_OFFSET    0x2040
+#define MMIO_GA_TAIL_OFFSET    0x2048
+#define MMIO_CNTR_CONF_OFFSET  0x4000
+#define MMIO_CNTR_REG_OFFSET   0x40000
+#define MMIO_REG_END_OFFSET    0x80000
+
+
+
+/* Extended Feature Bits */
+#define FEATURE_PREFETCH       (1ULL<<0)
+#define FEATURE_PPR            (1ULL<<1)
+#define FEATURE_X2APIC         (1ULL<<2)
+#define FEATURE_NX             (1ULL<<3)
+#define FEATURE_GT             (1ULL<<4)
+#define FEATURE_IA             (1ULL<<6)
+#define FEATURE_GA             (1ULL<<7)
+#define FEATURE_HE             (1ULL<<8)
+#define FEATURE_PC             (1ULL<<9)
+#define FEATURE_GAM_VAPIC      (1ULL<<21)
+#define FEATURE_EPHSUP         (1ULL<<50)
+
+#define FEATURE_PASID_SHIFT    32
+#define FEATURE_PASID_MASK     (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT   14
+#define FEATURE_GLXVAL_MASK    (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+/* Note:
+ * The current driver only support 16-bit PASID.
+ * Currently, hardware only implement upto 16-bit PASID
+ * even though the spec says it could have upto 20 bits.
+ */
+#define PASID_MASK             0x0000ffff
+
+/* MMIO status bits */
+#define MMIO_STATUS_EVT_INT_MASK       (1 << 1)
+#define MMIO_STATUS_COM_WAIT_INT_MASK  (1 << 2)
+#define MMIO_STATUS_PPR_INT_MASK       (1 << 6)
+#define MMIO_STATUS_GALOG_RUN_MASK     (1 << 8)
+#define MMIO_STATUS_GALOG_OVERFLOW_MASK        (1 << 9)
+#define MMIO_STATUS_GALOG_INT_MASK     (1 << 10)
+
+/* event logging constants */
+#define EVENT_ENTRY_SIZE       0x10
+#define EVENT_TYPE_SHIFT       28
+#define EVENT_TYPE_MASK                0xf
+#define EVENT_TYPE_ILL_DEV     0x1
+#define EVENT_TYPE_IO_FAULT    0x2
+#define EVENT_TYPE_DEV_TAB_ERR 0x3
+#define EVENT_TYPE_PAGE_TAB_ERR        0x4
+#define EVENT_TYPE_ILL_CMD     0x5
+#define EVENT_TYPE_CMD_HARD_ERR        0x6
+#define EVENT_TYPE_IOTLB_INV_TO        0x7
+#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_TYPE_INV_PPR_REQ 0x9
+#define EVENT_DEVID_MASK       0xffff
+#define EVENT_DEVID_SHIFT      0
+#define EVENT_DOMID_MASK_LO    0xffff
+#define EVENT_DOMID_MASK_HI    0xf0000
+#define EVENT_FLAGS_MASK       0xfff
+#define EVENT_FLAGS_SHIFT      0x10
+
+/* feature control bits */
+#define CONTROL_IOMMU_EN        0x00ULL
+#define CONTROL_HT_TUN_EN       0x01ULL
+#define CONTROL_EVT_LOG_EN      0x02ULL
+#define CONTROL_EVT_INT_EN      0x03ULL
+#define CONTROL_COMWAIT_EN      0x04ULL
+#define CONTROL_INV_TIMEOUT    0x05ULL
+#define CONTROL_PASSPW_EN       0x08ULL
+#define CONTROL_RESPASSPW_EN    0x09ULL
+#define CONTROL_COHERENT_EN     0x0aULL
+#define CONTROL_ISOC_EN         0x0bULL
+#define CONTROL_CMDBUF_EN       0x0cULL
+#define CONTROL_PPRLOG_EN       0x0dULL
+#define CONTROL_PPRINT_EN       0x0eULL
+#define CONTROL_PPR_EN          0x0fULL
+#define CONTROL_GT_EN           0x10ULL
+#define CONTROL_GA_EN           0x11ULL
+#define CONTROL_GAM_EN          0x19ULL
+#define CONTROL_GALOG_EN        0x1CULL
+#define CONTROL_GAINT_EN        0x1DULL
+#define CONTROL_XT_EN           0x32ULL
+#define CONTROL_INTCAPXT_EN     0x33ULL
+
+#define CTRL_INV_TO_MASK       (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE       0
+#define CTRL_INV_TO_1MS                1
+#define CTRL_INV_TO_10MS       2
+#define CTRL_INV_TO_100MS      3
+#define CTRL_INV_TO_1S         4
+#define CTRL_INV_TO_10S                5
+#define CTRL_INV_TO_100S       6
+
+/* command specific defines */
+#define CMD_COMPL_WAIT          0x01
+#define CMD_INV_DEV_ENTRY       0x02
+#define CMD_INV_IOMMU_PAGES    0x03
+#define CMD_INV_IOTLB_PAGES    0x04
+#define CMD_INV_IRT            0x05
+#define CMD_COMPLETE_PPR       0x07
+#define CMD_INV_ALL            0x08
+
+#define CMD_COMPL_WAIT_STORE_MASK      0x01
+#define CMD_COMPL_WAIT_INT_MASK                0x02
+#define CMD_INV_IOMMU_PAGES_SIZE_MASK  0x01
+#define CMD_INV_IOMMU_PAGES_PDE_MASK   0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK    0x04
+
+#define PPR_STATUS_MASK                        0xf
+#define PPR_STATUS_SHIFT               12
+
+#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS        0x7fffffffffffffffULL
+
+/* macros and definitions for device table entries */
+#define DEV_ENTRY_VALID         0x00
+#define DEV_ENTRY_TRANSLATION   0x01
+#define DEV_ENTRY_PPR           0x34
+#define DEV_ENTRY_IR            0x3d
+#define DEV_ENTRY_IW            0x3e
+#define DEV_ENTRY_NO_PAGE_FAULT        0x62
+#define DEV_ENTRY_EX            0x67
+#define DEV_ENTRY_SYSMGT1       0x68
+#define DEV_ENTRY_SYSMGT2       0x69
+#define DEV_ENTRY_IRQ_TBL_EN   0x80
+#define DEV_ENTRY_INIT_PASS     0xb8
+#define DEV_ENTRY_EINT_PASS     0xb9
+#define DEV_ENTRY_NMI_PASS      0xba
+#define DEV_ENTRY_LINT0_PASS    0xbe
+#define DEV_ENTRY_LINT1_PASS    0xbf
+#define DEV_ENTRY_MODE_MASK    0x07
+#define DEV_ENTRY_MODE_SHIFT   0x09
+
+#define MAX_DEV_TABLE_ENTRIES  0xffff
+
+/* constants to configure the command buffer */
+#define CMD_BUFFER_SIZE    8192
+#define CMD_BUFFER_UNINITIALIZED 1
+#define CMD_BUFFER_ENTRIES 512
+#define MMIO_CMD_SIZE_SHIFT 56
+#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+
+/* constants for event buffer handling */
+#define EVT_BUFFER_SIZE                8192 /* 512 entries */
+#define EVT_LEN_MASK           (0x9ULL << 56)
+
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES                512
+#define PPR_LOG_SIZE_SHIFT     56
+#define PPR_LOG_SIZE_512       (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE         16
+#define PPR_LOG_SIZE           (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x)                (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x)           (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x)           ((x) & 0xffffULL)
+#define PPR_TAG(x)             (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x)          (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x)          (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x)           ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT          0x01
+
+/* Constants for GA Log handling */
+#define GA_LOG_ENTRIES         512
+#define GA_LOG_SIZE_SHIFT      56
+#define GA_LOG_SIZE_512                (0x8ULL << GA_LOG_SIZE_SHIFT)
+#define GA_ENTRY_SIZE          8
+#define GA_LOG_SIZE            (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
+
+#define GA_TAG(x)              (u32)(x & 0xffffffffULL)
+#define GA_DEVID(x)            (u16)(((x) >> 32) & 0xffffULL)
+#define GA_REQ_TYPE(x)         (((x) >> 60) & 0xfULL)
+
+#define GA_GUEST_NR            0x1
+
+/* Bit value definition for dte irq remapping fields*/
+#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
+#define DTE_IRQ_REMAP_INTCTL_MASK      (0x3ULL << 60)
+#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
+#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
+#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
+#define DTE_IRQ_REMAP_ENABLE    1ULL
+
+#define PAGE_MODE_NONE    0x00
+#define PAGE_MODE_1_LEVEL 0x01
+#define PAGE_MODE_2_LEVEL 0x02
+#define PAGE_MODE_3_LEVEL 0x03
+#define PAGE_MODE_4_LEVEL 0x04
+#define PAGE_MODE_5_LEVEL 0x05
+#define PAGE_MODE_6_LEVEL 0x06
+#define PAGE_MODE_7_LEVEL 0x07
+
+#define PM_LEVEL_SHIFT(x)      (12 + ((x) * 9))
+#define PM_LEVEL_SIZE(x)       (((x) < 6) ? \
+                                 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
+                                  (0xffffffffffffffffULL))
+#define PM_LEVEL_INDEX(x, a)   (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
+#define PM_LEVEL_ENC(x)                (((x) << 9) & 0xe00ULL)
+#define PM_LEVEL_PDE(x, a)     ((a) | PM_LEVEL_ENC((x)) | \
+                                IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
+#define PM_PTE_LEVEL(pte)      (((pte) >> 9) & 0x7ULL)
+
+#define PM_MAP_4k              0
+#define PM_ADDR_MASK           0x000ffffffffff000ULL
+#define PM_MAP_MASK(lvl)       (PM_ADDR_MASK & \
+                               (~((1ULL << (12 + ((lvl) * 9))) - 1)))
+#define PM_ALIGNED(lvl, addr)  ((PM_MAP_MASK(lvl) & (addr)) == (addr))
+
+/*
+ * Returns the page table level to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_LEVEL(pagesize) \
+               ((__ffs(pagesize) - 12) / 9)
+/*
+ * Returns the number of ptes to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_PTE_COUNT(pagesize) \
+               (1ULL << ((__ffs(pagesize) - 12) % 9))
+
+/*
+ * Aligns a given io-virtual address to a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_ALIGN(address, pagesize) \
+               ((address) & ~((pagesize) - 1))
+/*
+ * Creates an IOMMU PTE for an address and a given pagesize
+ * The PTE has no permission bits set
+ * Pagesize is expected to be a power-of-two larger than 4096
+ */
+#define PAGE_SIZE_PTE(address, pagesize)               \
+               (((address) | ((pagesize) - 1)) &       \
+                (~(pagesize >> 1)) & PM_ADDR_MASK)
+
+/*
+ * Takes a PTE value with mode=0x07 and returns the page size it maps
+ */
+#define PTE_PAGE_SIZE(pte) \
+       (1ULL << (1 + ffz(((pte) | 0xfffULL))))
+
+/*
+ * Takes a page-table level and returns the default page-size for this level
+ */
+#define PTE_LEVEL_PAGE_SIZE(level)                     \
+       (1ULL << (12 + (9 * (level))))
+
+/*
+ * Bit value definition for I/O PTE fields
+ */
+#define IOMMU_PTE_PR (1ULL << 0)
+#define IOMMU_PTE_U  (1ULL << 59)
+#define IOMMU_PTE_FC (1ULL << 60)
+#define IOMMU_PTE_IR (1ULL << 61)
+#define IOMMU_PTE_IW (1ULL << 62)
+
+/*
+ * Bit value definition for DTE fields
+ */
+#define DTE_FLAG_V  (1ULL << 0)
+#define DTE_FLAG_TV (1ULL << 1)
+#define DTE_FLAG_IR (1ULL << 61)
+#define DTE_FLAG_IW (1ULL << 62)
+
+#define DTE_FLAG_IOTLB (1ULL << 32)
+#define DTE_FLAG_GV    (1ULL << 55)
+#define DTE_FLAG_MASK  (0x3ffULL << 32)
+#define DTE_GLX_SHIFT  (56)
+#define DTE_GLX_MASK   (3)
+#define DEV_DOMID_MASK 0xffffULL
+
+#define DTE_GCR3_VAL_A(x)      (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x)      (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x)      (((x) >> 31) & 0x1fffffULL)
+
+#define DTE_GCR3_INDEX_A       0
+#define DTE_GCR3_INDEX_B       1
+#define DTE_GCR3_INDEX_C       1
+
+#define DTE_GCR3_SHIFT_A       58
+#define DTE_GCR3_SHIFT_B       16
+#define DTE_GCR3_SHIFT_C       43
+
+#define GCR3_VALID             0x01ULL
+
+#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
+#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
+#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
+#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
+
+#define IOMMU_PROT_MASK 0x03
+#define IOMMU_PROT_IR 0x01
+#define IOMMU_PROT_IW 0x02
+
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE        (1 << 2)
+
+/* IOMMU capabilities */
+#define IOMMU_CAP_IOTLB   24
+#define IOMMU_CAP_NPCACHE 26
+#define IOMMU_CAP_EFR     27
+
+/* IOMMU Feature Reporting Field (for IVHD type 10h */
+#define IOMMU_FEAT_GASUP_SHIFT 6
+
+/* IOMMU Extended Feature Register (EFR) */
+#define IOMMU_EFR_XTSUP_SHIFT  2
+#define IOMMU_EFR_GASUP_SHIFT  7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT  46
+
+#define MAX_DOMAIN_ID 65536
+
+/* Protection domain flags */
+#define PD_DMA_OPS_MASK                (1UL << 0) /* domain used for dma_ops */
+#define PD_DEFAULT_MASK                (1UL << 1) /* domain is a default dma_ops
+                                             domain for an IOMMU */
+#define PD_PASSTHROUGH_MASK    (1UL << 2) /* domain has no page
+                                             translation */
+#define PD_IOMMUV2_MASK                (1UL << 3) /* domain has gcr3 table */
+
+extern bool amd_iommu_dump;
+#define DUMP_printk(format, arg...)                            \
+       do {                                                    \
+               if (amd_iommu_dump)                             \
+                       pr_info("AMD-Vi: " format, ## arg);     \
+       } while(0);
+
+/* global flag if IOMMUs cache non-present entries */
+extern bool amd_iommu_np_cache;
+/* Only true if all IOMMUs support device IOTLBs */
+extern bool amd_iommu_iotlb_sup;
+
+#define MAX_IRQS_PER_TABLE     256
+#define IRQ_TABLE_ALIGNMENT    128
+
+struct irq_remap_table {
+       raw_spinlock_t lock;
+       unsigned min_index;
+       u32 *table;
+};
+
+extern struct irq_remap_table **irq_lookup_table;
+
+/* Interrupt remapping feature used? */
+extern bool amd_iommu_irq_remap;
+
+/* kmem_cache to get tables with 128 byte alignement */
+extern struct kmem_cache *amd_iommu_irq_cache;
+
+/*
+ * Make iterating over all IOMMUs easier
+ */
+#define for_each_iommu(iommu) \
+       list_for_each_entry((iommu), &amd_iommu_list, list)
+#define for_each_iommu_safe(iommu, next) \
+       list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
+
+#define APERTURE_RANGE_SHIFT   27      /* 128 MB */
+#define APERTURE_RANGE_SIZE    (1ULL << APERTURE_RANGE_SHIFT)
+#define APERTURE_RANGE_PAGES   (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
+#define APERTURE_MAX_RANGES    32      /* allows 4GB of DMA address space */
+#define APERTURE_RANGE_INDEX(a)        ((a) >> APERTURE_RANGE_SHIFT)
+#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+       u64 address;    /* IO virtual address of the fault*/
+       u32 pasid;      /* Address space identifier */
+       u16 device_id;  /* Originating PCI device id */
+       u16 tag;        /* PPR tag */
+       u16 flags;      /* Fault flags */
+
+};
+
+
+struct iommu_domain;
+struct irq_domain;
+struct amd_irte_ops;
+
+#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED      (1 << 0)
+
+/*
+ * This structure contains generic data for  IOMMU protection domains
+ * independent of their use.
+ */
+struct protection_domain {
+       struct list_head dev_list; /* List of all devices in this domain */
+       struct iommu_domain domain; /* generic domain handle used by
+                                      iommu core code */
+       spinlock_t lock;        /* mostly used to lock the page table*/
+       u16 id;                 /* the domain id written to the device table */
+       atomic64_t pt_root;     /* pgtable root and pgtable mode */
+       int glx;                /* Number of levels for GCR3 table */
+       u64 *gcr3_tbl;          /* Guest CR3 table */
+       unsigned long flags;    /* flags to find out type of domain */
+       unsigned dev_cnt;       /* devices assigned to this domain */
+       unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+};
+
+/* For decocded pt_root */
+struct domain_pgtable {
+       int mode;
+       u64 *root;
+};
+
+/*
+ * Structure where we save information about one hardware AMD IOMMU in the
+ * system.
+ */
+struct amd_iommu {
+       struct list_head list;
+
+       /* Index within the IOMMU array */
+       int index;
+
+       /* locks the accesses to the hardware */
+       raw_spinlock_t lock;
+
+       /* Pointer to PCI device of this IOMMU */
+       struct pci_dev *dev;
+
+       /* Cache pdev to root device for resume quirks */
+       struct pci_dev *root_pdev;
+
+       /* physical address of MMIO space */
+       u64 mmio_phys;
+
+       /* physical end address of MMIO space */
+       u64 mmio_phys_end;
+
+       /* virtual address of MMIO space */
+       u8 __iomem *mmio_base;
+
+       /* capabilities of that IOMMU read from ACPI */
+       u32 cap;
+
+       /* flags read from acpi table */
+       u8 acpi_flags;
+
+       /* Extended features */
+       u64 features;
+
+       /* IOMMUv2 */
+       bool is_iommu_v2;
+
+       /* PCI device id of the IOMMU device */
+       u16 devid;
+
+       /*
+        * Capability pointer. There could be more than one IOMMU per PCI
+        * device function if there are more than one AMD IOMMU capability
+        * pointers.
+        */
+       u16 cap_ptr;
+
+       /* pci domain of this IOMMU */
+       u16 pci_seg;
+
+       /* start of exclusion range of that IOMMU */
+       u64 exclusion_start;
+       /* length of exclusion range of that IOMMU */
+       u64 exclusion_length;
+
+       /* command buffer virtual address */
+       u8 *cmd_buf;
+       u32 cmd_buf_head;
+       u32 cmd_buf_tail;
+
+       /* event buffer virtual address */
+       u8 *evt_buf;
+
+       /* Base of the PPR log, if present */
+       u8 *ppr_log;
+
+       /* Base of the GA log, if present */
+       u8 *ga_log;
+
+       /* Tail of the GA log, if present */
+       u8 *ga_log_tail;
+
+       /* true if interrupts for this IOMMU are already enabled */
+       bool int_enabled;
+
+       /* if one, we need to send a completion wait command */
+       bool need_sync;
+
+       /* Handle for IOMMU core code */
+       struct iommu_device iommu;
+
+       /*
+        * We can't rely on the BIOS to restore all values on reinit, so we
+        * need to stash them
+        */
+
+       /* The iommu BAR */
+       u32 stored_addr_lo;
+       u32 stored_addr_hi;
+
+       /*
+        * Each iommu has 6 l1s, each of which is documented as having 0x12
+        * registers
+        */
+       u32 stored_l1[6][0x12];
+
+       /* The l2 indirect registers */
+       u32 stored_l2[0x83];
+
+       /* The maximum PC banks and counters/bank (PCSup=1) */
+       u8 max_banks;
+       u8 max_counters;
+#ifdef CONFIG_IRQ_REMAP
+       struct irq_domain *ir_domain;
+       struct irq_domain *msi_domain;
+
+       struct amd_irte_ops *irte_ops;
+#endif
+
+       u32 flags;
+       volatile u64 __aligned(8) cmd_sem;
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+       /* DebugFS Info */
+       struct dentry *debugfs;
+#endif
+       /* IRQ notifier for IntCapXT interrupt */
+       struct irq_affinity_notify intcapxt_notify;
+};
+
+static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
+{
+       struct iommu_device *iommu = dev_to_iommu_device(dev);
+
+       return container_of(iommu, struct amd_iommu, iommu);
+}
+
+#define ACPIHID_UID_LEN 256
+#define ACPIHID_HID_LEN 9
+
+struct acpihid_map_entry {
+       struct list_head list;
+       u8 uid[ACPIHID_UID_LEN];
+       u8 hid[ACPIHID_HID_LEN];
+       u16 devid;
+       u16 root_devid;
+       bool cmd_line;
+       struct iommu_group *group;
+};
+
+struct devid_map {
+       struct list_head list;
+       u8 id;
+       u16 devid;
+       bool cmd_line;
+};
+
+/*
+ * This struct contains device specific data for the IOMMU
+ */
+struct iommu_dev_data {
+       /*Protect against attach/detach races */
+       spinlock_t lock;
+
+       struct list_head list;            /* For domain->dev_list */
+       struct llist_node dev_data_list;  /* For global dev_data_list */
+       struct protection_domain *domain; /* Domain the device is bound to */
+       struct pci_dev *pdev;
+       u16 devid;                        /* PCI Device ID */
+       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
+       struct {
+               bool enabled;
+               int qdep;
+       } ats;                            /* ATS state */
+       bool pri_tlp;                     /* PASID TLB required for
+                                            PPR completions */
+       u32 errata;                       /* Bitmap for errata to apply */
+       bool use_vapic;                   /* Enable device to use vapic mode */
+       bool defer_attach;
+
+       struct ratelimit_state rs;        /* Ratelimit IOPF messages */
+};
+
+/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
+extern struct list_head ioapic_map;
+extern struct list_head hpet_map;
+extern struct list_head acpihid_map;
+
+/*
+ * List with all IOMMUs in the system. This list is not locked because it is
+ * only written and read at driver initialization or suspend time
+ */
+extern struct list_head amd_iommu_list;
+
+/*
+ * Array with pointers to each IOMMU struct
+ * The indices are referenced in the protection domains
+ */
+extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
+
+/*
+ * Structure defining one entry in the device table
+ */
+struct dev_table_entry {
+       u64 data[4];
+};
+
+/*
+ * One entry for unity mappings parsed out of the ACPI table.
+ */
+struct unity_map_entry {
+       struct list_head list;
+
+       /* starting device id this entry is used for (including) */
+       u16 devid_start;
+       /* end device id this entry is used for (including) */
+       u16 devid_end;
+
+       /* start address to unity map (including) */
+       u64 address_start;
+       /* end address to unity map (including) */
+       u64 address_end;
+
+       /* required protection */
+       int prot;
+};
+
+/*
+ * List of all unity mappings. It is not locked because as runtime it is only
+ * read. It is created at ACPI table parsing time.
+ */
+extern struct list_head amd_iommu_unity_map;
+
+/*
+ * Data structures for device handling
+ */
+
+/*
+ * Device table used by hardware. Read and write accesses by software are
+ * locked with the amd_iommu_pd_table lock.
+ */
+extern struct dev_table_entry *amd_iommu_dev_table;
+
+/*
+ * Alias table to find requestor ids to device ids. Not locked because only
+ * read on runtime.
+ */
+extern u16 *amd_iommu_alias_table;
+
+/*
+ * Reverse lookup table to find the IOMMU which translates a specific device.
+ */
+extern struct amd_iommu **amd_iommu_rlookup_table;
+
+/* size of the dma_ops aperture as power of 2 */
+extern unsigned amd_iommu_aperture_order;
+
+/* largest PCI device id we expect translation requests for */
+extern u16 amd_iommu_last_bdf;
+
+/* allocation bitmap for domain ids */
+extern unsigned long *amd_iommu_pd_alloc_bitmap;
+
+/*
+ * If true, the addresses will be flushed on unmap time, not when
+ * they are reused
+ */
+extern bool amd_iommu_unmap_flush;
+
+/* Smallest max PASID supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasid;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+extern void iommu_flush_all_caches(struct amd_iommu *iommu);
+
+static inline int get_ioapic_devid(int id)
+{
+       struct devid_map *entry;
+
+       list_for_each_entry(entry, &ioapic_map, list) {
+               if (entry->id == id)
+                       return entry->devid;
+       }
+
+       return -EINVAL;
+}
+
+static inline int get_hpet_devid(int id)
+{
+       struct devid_map *entry;
+
+       list_for_each_entry(entry, &hpet_map, list) {
+               if (entry->id == id)
+                       return entry->devid;
+       }
+
+       return -EINVAL;
+}
+
+enum amd_iommu_intr_mode_type {
+       AMD_IOMMU_GUEST_IR_LEGACY,
+
+       /* This mode is not visible to users. It is used when
+        * we cannot fully enable vAPIC and fallback to only support
+        * legacy interrupt remapping via 128-bit IRTE.
+        */
+       AMD_IOMMU_GUEST_IR_LEGACY_GA,
+       AMD_IOMMU_GUEST_IR_VAPIC,
+};
+
+#define AMD_IOMMU_GUEST_IR_GA(x)       (x == AMD_IOMMU_GUEST_IR_VAPIC || \
+                                        x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
+
+#define AMD_IOMMU_GUEST_IR_VAPIC(x)    (x == AMD_IOMMU_GUEST_IR_VAPIC)
+
+union irte {
+       u32 val;
+       struct {
+               u32 valid       : 1,
+                   no_fault    : 1,
+                   int_type    : 3,
+                   rq_eoi      : 1,
+                   dm          : 1,
+                   rsvd_1      : 1,
+                   destination : 8,
+                   vector      : 8,
+                   rsvd_2      : 8;
+       } fields;
+};
+
+#define APICID_TO_IRTE_DEST_LO(x)    (x & 0xffffff)
+#define APICID_TO_IRTE_DEST_HI(x)    ((x >> 24) & 0xff)
+
+union irte_ga_lo {
+       u64 val;
+
+       /* For int remapping */
+       struct {
+               u64 valid       : 1,
+                   no_fault    : 1,
+                   /* ------ */
+                   int_type    : 3,
+                   rq_eoi      : 1,
+                   dm          : 1,
+                   /* ------ */
+                   guest_mode  : 1,
+                   destination : 24,
+                   ga_tag      : 32;
+       } fields_remap;
+
+       /* For guest vAPIC */
+       struct {
+               u64 valid       : 1,
+                   no_fault    : 1,
+                   /* ------ */
+                   ga_log_intr : 1,
+                   rsvd1       : 3,
+                   is_run      : 1,
+                   /* ------ */
+                   guest_mode  : 1,
+                   destination : 24,
+                   ga_tag      : 32;
+       } fields_vapic;
+};
+
+union irte_ga_hi {
+       u64 val;
+       struct {
+               u64 vector      : 8,
+                   rsvd_1      : 4,
+                   ga_root_ptr : 40,
+                   rsvd_2      : 4,
+                   destination : 8;
+       } fields;
+};
+
+struct irte_ga {
+       union irte_ga_lo lo;
+       union irte_ga_hi hi;
+};
+
+struct irq_2_irte {
+       u16 devid; /* Device ID for IRTE table */
+       u16 index; /* Index into IRTE table*/
+};
+
+struct amd_ir_data {
+       u32 cached_ga_tag;
+       struct irq_2_irte irq_2_irte;
+       struct msi_msg msi_entry;
+       void *entry;    /* Pointer to union irte or struct irte_ga */
+       void *ref;      /* Pointer to the actual irte */
+
+       /**
+        * Store information for activate/de-activate
+        * Guest virtual APIC mode during runtime.
+        */
+       struct irq_cfg *cfg;
+       int ga_vector;
+       int ga_root_ptr;
+       int ga_tag;
+};
+
+struct amd_irte_ops {
+       void (*prepare)(void *, u32, u32, u8, u32, int);
+       void (*activate)(void *, u16, u16);
+       void (*deactivate)(void *, u16, u16);
+       void (*set_affinity)(void *, u16, u16, u8, u32);
+       void *(*get)(struct irq_remap_table *, int);
+       void (*set_allocated)(struct irq_remap_table *, int);
+       bool (*is_allocated)(struct irq_remap_table *, int);
+       void (*clear_allocated)(struct irq_remap_table *, int);
+};
+
+#ifdef CONFIG_IRQ_REMAP
+extern struct amd_irte_ops irte_32_ops;
+extern struct amd_irte_ops irte_128_ops;
+#endif
+
+#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c
new file mode 100644 (file)
index 0000000..545372f
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD IOMMU driver
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+
+#include "amd_iommu.h"
+
+static struct dentry *amd_iommu_debugfs;
+static DEFINE_MUTEX(amd_iommu_debugfs_lock);
+
+#define        MAX_NAME_LEN    20
+
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+{
+       char name[MAX_NAME_LEN + 1];
+
+       mutex_lock(&amd_iommu_debugfs_lock);
+       if (!amd_iommu_debugfs)
+               amd_iommu_debugfs = debugfs_create_dir("amd",
+                                                      iommu_debugfs_dir);
+       mutex_unlock(&amd_iommu_debugfs_lock);
+
+       snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+       iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
new file mode 100644 (file)
index 0000000..6ebd482
--- /dev/null
@@ -0,0 +1,3174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ *         Leo Duran <leo.duran@amd.com>
+ */
+
+#define pr_fmt(fmt)     "AMD-Vi: " fmt
+#define dev_fmt(fmt)    pr_fmt(fmt)
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/amd-iommu.h>
+#include <linux/export.h>
+#include <linux/kmemleak.h>
+#include <linux/mem_encrypt.h>
+#include <asm/pci-direct.h>
+#include <asm/iommu.h>
+#include <asm/apic.h>
+#include <asm/msidef.h>
+#include <asm/gart.h>
+#include <asm/x86_init.h>
+#include <asm/iommu_table.h>
+#include <asm/io_apic.h>
+#include <asm/irq_remapping.h>
+
+#include <linux/crash_dump.h>
+
+#include "amd_iommu.h"
+#include "../irq_remapping.h"
+
+/*
+ * definitions for the ACPI scanning code
+ */
+#define IVRS_HEADER_LENGTH 48
+
+#define ACPI_IVHD_TYPE_MAX_SUPPORTED   0x40
+#define ACPI_IVMD_TYPE_ALL              0x20
+#define ACPI_IVMD_TYPE                  0x21
+#define ACPI_IVMD_TYPE_RANGE            0x22
+
+#define IVHD_DEV_ALL                    0x01
+#define IVHD_DEV_SELECT                 0x02
+#define IVHD_DEV_SELECT_RANGE_START     0x03
+#define IVHD_DEV_RANGE_END              0x04
+#define IVHD_DEV_ALIAS                  0x42
+#define IVHD_DEV_ALIAS_RANGE            0x43
+#define IVHD_DEV_EXT_SELECT             0x46
+#define IVHD_DEV_EXT_SELECT_RANGE       0x47
+#define IVHD_DEV_SPECIAL               0x48
+#define IVHD_DEV_ACPI_HID              0xf0
+
+#define UID_NOT_PRESENT                 0
+#define UID_IS_INTEGER                  1
+#define UID_IS_CHARACTER                2
+
+#define IVHD_SPECIAL_IOAPIC            1
+#define IVHD_SPECIAL_HPET              2
+
+#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
+#define IVHD_FLAG_PASSPW_EN_MASK        0x02
+#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
+#define IVHD_FLAG_ISOC_EN_MASK          0x08
+
+#define IVMD_FLAG_EXCL_RANGE            0x08
+#define IVMD_FLAG_IW                    0x04
+#define IVMD_FLAG_IR                    0x02
+#define IVMD_FLAG_UNITY_MAP             0x01
+
+#define ACPI_DEVFLAG_INITPASS           0x01
+#define ACPI_DEVFLAG_EXTINT             0x02
+#define ACPI_DEVFLAG_NMI                0x04
+#define ACPI_DEVFLAG_SYSMGT1            0x10
+#define ACPI_DEVFLAG_SYSMGT2            0x20
+#define ACPI_DEVFLAG_LINT0              0x40
+#define ACPI_DEVFLAG_LINT1              0x80
+#define ACPI_DEVFLAG_ATSDIS             0x10000000
+
+#define LOOP_TIMEOUT   100000
+/*
+ * ACPI table definitions
+ *
+ * These data structures are laid over the table to parse the important values
+ * out of it.
+ */
+
+extern const struct iommu_ops amd_iommu_ops;
+
+/*
+ * structure describing one IOMMU in the ACPI table. Typically followed by one
+ * or more ivhd_entrys.
+ */
+struct ivhd_header {
+       u8 type;
+       u8 flags;
+       u16 length;
+       u16 devid;
+       u16 cap_ptr;
+       u64 mmio_phys;
+       u16 pci_seg;
+       u16 info;
+       u32 efr_attr;
+
+       /* Following only valid on IVHD type 11h and 40h */
+       u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
+       u64 res;
+} __attribute__((packed));
+
+/*
+ * A device entry describing which devices a specific IOMMU translates and
+ * which requestor ids they use.
+ */
+struct ivhd_entry {
+       u8 type;
+       u16 devid;
+       u8 flags;
+       u32 ext;
+       u32 hidh;
+       u64 cid;
+       u8 uidf;
+       u8 uidl;
+       u8 uid;
+} __attribute__((packed));
+
+/*
+ * An AMD IOMMU memory definition structure. It defines things like exclusion
+ * ranges for devices and regions that should be unity mapped.
+ */
+struct ivmd_header {
+       u8 type;
+       u8 flags;
+       u16 length;
+       u16 devid;
+       u16 aux;
+       u64 resv;
+       u64 range_start;
+       u64 range_length;
+} __attribute__((packed));
+
+bool amd_iommu_dump;
+bool amd_iommu_irq_remap __read_mostly;
+
+int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+
+static bool amd_iommu_detected;
+static bool __initdata amd_iommu_disabled;
+static int amd_iommu_target_ivhd_type;
+
+u16 amd_iommu_last_bdf;                        /* largest PCI device id we have
+                                          to handle */
+LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
+                                          we find in ACPI */
+bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
+
+LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
+                                          system */
+
+/* Array to assign indices to IOMMUs*/
+struct amd_iommu *amd_iommus[MAX_IOMMUS];
+
+/* Number of IOMMUs present in the system */
+static int amd_iommus_present;
+
+/* IOMMUs have a non-present cache? */
+bool amd_iommu_np_cache __read_mostly;
+bool amd_iommu_iotlb_sup __read_mostly = true;
+
+u32 amd_iommu_max_pasid __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
+/*
+ * Pointer to the device table which is shared by all AMD IOMMUs
+ * it is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+struct dev_table_entry *amd_iommu_dev_table;
+/*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+static struct dev_table_entry *old_dev_tbl_cpy;
+
+/*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+u16 *amd_iommu_alias_table;
+
+/*
+ * The rlookup table is used to find the IOMMU which is responsible
+ * for a specific device. It is also indexed by the PCI device id.
+ */
+struct amd_iommu **amd_iommu_rlookup_table;
+EXPORT_SYMBOL(amd_iommu_rlookup_table);
+
+/*
+ * This table is used to find the irq remapping table for a given device id
+ * quickly.
+ */
+struct irq_remap_table **irq_lookup_table;
+
+/*
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
+ * to know which ones are already in use.
+ */
+unsigned long *amd_iommu_pd_alloc_bitmap;
+
+static u32 dev_table_size;     /* size of the device table */
+static u32 alias_table_size;   /* size of the alias table */
+static u32 rlookup_table_size; /* size if the rlookup table */
+
+enum iommu_init_state {
+       IOMMU_START_STATE,
+       IOMMU_IVRS_DETECTED,
+       IOMMU_ACPI_FINISHED,
+       IOMMU_ENABLED,
+       IOMMU_PCI_INIT,
+       IOMMU_INTERRUPTS_EN,
+       IOMMU_DMA_OPS,
+       IOMMU_INITIALIZED,
+       IOMMU_NOT_FOUND,
+       IOMMU_INIT_ERROR,
+       IOMMU_CMDLINE_DISABLED,
+};
+
+/* Early ioapic and hpet maps from kernel command line */
+#define EARLY_MAP_SIZE         4
+static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
+static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
+
+static int __initdata early_ioapic_map_size;
+static int __initdata early_hpet_map_size;
+static int __initdata early_acpihid_map_size;
+
+static bool __initdata cmdline_maps;
+
+static enum iommu_init_state init_state = IOMMU_START_STATE;
+
+static int amd_iommu_enable_interrupts(void);
+static int __init iommu_go_to_state(enum iommu_init_state state);
+static void init_device_table_dma(void);
+
+static bool amd_iommu_pre_enabled = true;
+
+bool translation_pre_enabled(struct amd_iommu *iommu)
+{
+       return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
+}
+EXPORT_SYMBOL(translation_pre_enabled);
+
+static void clear_translation_pre_enabled(struct amd_iommu *iommu)
+{
+       iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
+}
+
+static void init_translation_status(struct amd_iommu *iommu)
+{
+       u64 ctrl;
+
+       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+       if (ctrl & (1<<CONTROL_IOMMU_EN))
+               iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
+}
+
+static inline void update_last_devid(u16 devid)
+{
+       if (devid > amd_iommu_last_bdf)
+               amd_iommu_last_bdf = devid;
+}
+
+static inline unsigned long tbl_size(int entry_size)
+{
+       unsigned shift = PAGE_SHIFT +
+                        get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+
+       return 1UL << shift;
+}
+
+int amd_iommu_get_num_iommus(void)
+{
+       return amd_iommus_present;
+}
+
+/* Access to l1 and l2 indexed register spaces */
+
+static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
+{
+       u32 val;
+
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+       pci_read_config_dword(iommu->dev, 0xfc, &val);
+       return val;
+}
+
+static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
+{
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
+       pci_write_config_dword(iommu->dev, 0xfc, val);
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+}
+
+static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
+{
+       u32 val;
+
+       pci_write_config_dword(iommu->dev, 0xf0, address);
+       pci_read_config_dword(iommu->dev, 0xf4, &val);
+       return val;
+}
+
+static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+{
+       pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
+       pci_write_config_dword(iommu->dev, 0xf4, val);
+}
+
+/****************************************************************************
+ *
+ * AMD IOMMU MMIO register space handling functions
+ *
+ * These functions are used to program the IOMMU device registers in
+ * MMIO space required for that driver.
+ *
+ ****************************************************************************/
+
+/*
+ * This function set the exclusion range in the IOMMU. DMA accesses to the
+ * exclusion range are passed through untranslated
+ */
+static void iommu_set_exclusion_range(struct amd_iommu *iommu)
+{
+       u64 start = iommu->exclusion_start & PAGE_MASK;
+       u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
+       u64 entry;
+
+       if (!iommu->exclusion_start)
+               return;
+
+       entry = start | MMIO_EXCL_ENABLE_MASK;
+       memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+                       &entry, sizeof(entry));
+
+       entry = limit;
+       memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+                       &entry, sizeof(entry));
+}
+
+/* Programs the physical address of the device table into the IOMMU hardware */
+static void iommu_set_device_table(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       BUG_ON(iommu->mmio_base == NULL);
+
+       entry = iommu_virt_to_phys(amd_iommu_dev_table);
+       entry |= (dev_table_size >> 12) - 1;
+       memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
+                       &entry, sizeof(entry));
+}
+
+/* Generic functions to enable/disable certain features of the IOMMU. */
+static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+{
+       u64 ctrl;
+
+       ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
+       ctrl |= (1ULL << bit);
+       writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
+}
+
+static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+{
+       u64 ctrl;
+
+       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+       ctrl &= ~(1ULL << bit);
+       writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+       u64 ctrl;
+
+       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+       ctrl &= ~CTRL_INV_TO_MASK;
+       ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+       writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+/* Function to enable the hardware */
+static void iommu_enable(struct amd_iommu *iommu)
+{
+       iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
+}
+
+static void iommu_disable(struct amd_iommu *iommu)
+{
+       if (!iommu->mmio_base)
+               return;
+
+       /* Disable command buffer */
+       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+       /* Disable event logging and event interrupts */
+       iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
+       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+
+       /* Disable IOMMU GA_LOG */
+       iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+       iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+       /* Disable IOMMU hardware itself */
+       iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
+}
+
+/*
+ * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
+ * the system has one.
+ */
+static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
+{
+       if (!request_mem_region(address, end, "amd_iommu")) {
+               pr_err("Can not reserve memory region %llx-%llx for mmio\n",
+                       address, end);
+               pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
+               return NULL;
+       }
+
+       return (u8 __iomem *)ioremap(address, end);
+}
+
+static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
+{
+       if (iommu->mmio_base)
+               iounmap(iommu->mmio_base);
+       release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
+}
+
+static inline u32 get_ivhd_header_size(struct ivhd_header *h)
+{
+       u32 size = 0;
+
+       switch (h->type) {
+       case 0x10:
+               size = 24;
+               break;
+       case 0x11:
+       case 0x40:
+               size = 40;
+               break;
+       }
+       return size;
+}
+
+/****************************************************************************
+ *
+ * The functions below belong to the first pass of AMD IOMMU ACPI table
+ * parsing. In this pass we try to find out the highest device id this
+ * code has to handle. Upon this information the size of the shared data
+ * structures is determined later.
+ *
+ ****************************************************************************/
+
+/*
+ * This function calculates the length of a given IVHD entry
+ */
+static inline int ivhd_entry_length(u8 *ivhd)
+{
+       u32 type = ((struct ivhd_entry *)ivhd)->type;
+
+       if (type < 0x80) {
+               return 0x04 << (*ivhd >> 6);
+       } else if (type == IVHD_DEV_ACPI_HID) {
+               /* For ACPI_HID, offset 21 is uid len */
+               return *((u8 *)ivhd + 21) + 22;
+       }
+       return 0;
+}
+
+/*
+ * After reading the highest device id from the IOMMU PCI capability header
+ * this function looks if there is a higher device id defined in the ACPI table
+ */
+static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
+{
+       u8 *p = (void *)h, *end = (void *)h;
+       struct ivhd_entry *dev;
+
+       u32 ivhd_size = get_ivhd_header_size(h);
+
+       if (!ivhd_size) {
+               pr_err("Unsupported IVHD type %#x\n", h->type);
+               return -EINVAL;
+       }
+
+       p += ivhd_size;
+       end += h->length;
+
+       while (p < end) {
+               dev = (struct ivhd_entry *)p;
+               switch (dev->type) {
+               case IVHD_DEV_ALL:
+                       /* Use maximum BDF value for DEV_ALL */
+                       update_last_devid(0xffff);
+                       break;
+               case IVHD_DEV_SELECT:
+               case IVHD_DEV_RANGE_END:
+               case IVHD_DEV_ALIAS:
+               case IVHD_DEV_EXT_SELECT:
+                       /* all the above subfield types refer to device ids */
+                       update_last_devid(dev->devid);
+                       break;
+               default:
+                       break;
+               }
+               p += ivhd_entry_length(p);
+       }
+
+       WARN_ON(p != end);
+
+       return 0;
+}
+
+static int __init check_ivrs_checksum(struct acpi_table_header *table)
+{
+       int i;
+       u8 checksum = 0, *p = (u8 *)table;
+
+       for (i = 0; i < table->length; ++i)
+               checksum += p[i];
+       if (checksum != 0) {
+               /* ACPI table corrupt */
+               pr_err(FW_BUG "IVRS invalid checksum\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/*
+ * Iterate over all IVHD entries in the ACPI table and find the highest device
+ * id which we need to handle. This is the first of three functions which parse
+ * the ACPI table. So we check the checksum here.
+ */
+static int __init find_last_devid_acpi(struct acpi_table_header *table)
+{
+       u8 *p = (u8 *)table, *end = (u8 *)table;
+       struct ivhd_header *h;
+
+       p += IVRS_HEADER_LENGTH;
+
+       end += table->length;
+       while (p < end) {
+               h = (struct ivhd_header *)p;
+               if (h->type == amd_iommu_target_ivhd_type) {
+                       int ret = find_last_devid_from_ivhd(h);
+
+                       if (ret)
+                               return ret;
+               }
+               p += h->length;
+       }
+       WARN_ON(p != end);
+
+       return 0;
+}
+
+/****************************************************************************
+ *
+ * The following functions belong to the code path which parses the ACPI table
+ * the second time. In this ACPI parsing iteration we allocate IOMMU specific
+ * data structures, initialize the device/alias/rlookup table and also
+ * basically initialize the hardware.
+ *
+ ****************************************************************************/
+
+/*
+ * Allocates the command buffer. This buffer is per AMD IOMMU. We can
+ * write commands to that buffer later and the IOMMU will execute them
+ * asynchronously
+ */
+static int __init alloc_command_buffer(struct amd_iommu *iommu)
+{
+       iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                 get_order(CMD_BUFFER_SIZE));
+
+       return iommu->cmd_buf ? 0 : -ENOMEM;
+}
+
+/*
+ * This function resets the command buffer if the IOMMU stopped fetching
+ * commands from it.
+ */
+void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+{
+       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+       writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+       iommu->cmd_buf_head = 0;
+       iommu->cmd_buf_tail = 0;
+
+       iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
+}
+
+/*
+ * This function writes the command buffer address to the hardware and
+ * enables it.
+ */
+static void iommu_enable_command_buffer(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       BUG_ON(iommu->cmd_buf == NULL);
+
+       entry = iommu_virt_to_phys(iommu->cmd_buf);
+       entry |= MMIO_CMD_SIZE_512;
+
+       memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+                   &entry, sizeof(entry));
+
+       amd_iommu_reset_cmd_buffer(iommu);
+}
+
+/*
+ * This function disables the command buffer
+ */
+static void iommu_disable_command_buffer(struct amd_iommu *iommu)
+{
+       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+}
+
+static void __init free_command_buffer(struct amd_iommu *iommu)
+{
+       free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+}
+
+/* allocates the memory where the IOMMU will log its events to */
+static int __init alloc_event_buffer(struct amd_iommu *iommu)
+{
+       iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                 get_order(EVT_BUFFER_SIZE));
+
+       return iommu->evt_buf ? 0 : -ENOMEM;
+}
+
+static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       BUG_ON(iommu->evt_buf == NULL);
+
+       entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+
+       memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+                   &entry, sizeof(entry));
+
+       /* set head and tail to zero manually */
+       writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+       iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
+ * This function disables the event log buffer
+ */
+static void iommu_disable_event_buffer(struct amd_iommu *iommu)
+{
+       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+static void __init free_event_buffer(struct amd_iommu *iommu)
+{
+       free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+}
+
+/* allocates the memory where the IOMMU will log its events to */
+static int __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+       iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                 get_order(PPR_LOG_SIZE));
+
+       return iommu->ppr_log ? 0 : -ENOMEM;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       if (iommu->ppr_log == NULL)
+               return;
+
+       entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+       memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+                   &entry, sizeof(entry));
+
+       /* set head and tail to zero manually */
+       writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+       iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
+       iommu_feature_enable(iommu, CONTROL_PPR_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+       if (iommu->ppr_log == NULL)
+               return;
+
+       free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void free_ga_log(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+       if (iommu->ga_log)
+               free_pages((unsigned long)iommu->ga_log,
+                           get_order(GA_LOG_SIZE));
+       if (iommu->ga_log_tail)
+               free_pages((unsigned long)iommu->ga_log_tail,
+                           get_order(8));
+#endif
+}
+
+static int iommu_ga_log_enable(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+       u32 status, i;
+
+       if (!iommu->ga_log)
+               return -EINVAL;
+
+       status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+       /* Check if already running */
+       if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+               return 0;
+
+       iommu_feature_enable(iommu, CONTROL_GAINT_EN);
+       iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+
+       for (i = 0; i < LOOP_TIMEOUT; ++i) {
+               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+               if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+                       break;
+       }
+
+       if (i >= LOOP_TIMEOUT)
+               return -EINVAL;
+#endif /* CONFIG_IRQ_REMAP */
+       return 0;
+}
+
+#ifdef CONFIG_IRQ_REMAP
+static int iommu_init_ga_log(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+               return 0;
+
+       iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                       get_order(GA_LOG_SIZE));
+       if (!iommu->ga_log)
+               goto err_out;
+
+       iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                       get_order(8));
+       if (!iommu->ga_log_tail)
+               goto err_out;
+
+       entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+       memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+                   &entry, sizeof(entry));
+       entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+                (BIT_ULL(52)-1)) & ~7ULL;
+       memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+                   &entry, sizeof(entry));
+       writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+       return 0;
+err_out:
+       free_ga_log(iommu);
+       return -EINVAL;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+static int iommu_init_ga(struct amd_iommu *iommu)
+{
+       int ret = 0;
+
+#ifdef CONFIG_IRQ_REMAP
+       /* Note: We have already checked GASup from IVRS table.
+        *       Now, we need to make sure that GAMSup is set.
+        */
+       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+           !iommu_feature(iommu, FEATURE_GAM_VAPIC))
+               amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
+       ret = iommu_init_ga_log(iommu);
+#endif /* CONFIG_IRQ_REMAP */
+
+       return ret;
+}
+
+static void iommu_enable_xt(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+       /*
+        * XT mode (32-bit APIC destination ID) requires
+        * GA mode (128-bit IRTE support) as a prerequisite.
+        */
+       if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
+           amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+               iommu_feature_enable(iommu, CONTROL_XT_EN);
+#endif /* CONFIG_IRQ_REMAP */
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+       if (!iommu_feature(iommu, FEATURE_GT))
+               return;
+
+       iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
+/* sets a specific bit in the device table entry. */
+static void set_dev_entry_bit(u16 devid, u8 bit)
+{
+       int i = (bit >> 6) & 0x03;
+       int _bit = bit & 0x3f;
+
+       amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static int get_dev_entry_bit(u16 devid, u8 bit)
+{
+       int i = (bit >> 6) & 0x03;
+       int _bit = bit & 0x3f;
+
+       return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+}
+
+
+static bool copy_device_table(void)
+{
+       u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
+       struct dev_table_entry *old_devtb = NULL;
+       u32 lo, hi, devid, old_devtb_size;
+       phys_addr_t old_devtb_phys;
+       struct amd_iommu *iommu;
+       u16 dom_id, dte_v, irq_v;
+       gfp_t gfp_flag;
+       u64 tmp;
+
+       if (!amd_iommu_pre_enabled)
+               return false;
+
+       pr_warn("Translation is already enabled - trying to copy translation structures\n");
+       for_each_iommu(iommu) {
+               /* All IOMMUs should use the same device table with the same size */
+               lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+               hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+               entry = (((u64) hi) << 32) + lo;
+               if (last_entry && last_entry != entry) {
+                       pr_err("IOMMU:%d should use the same dev table as others!\n",
+                               iommu->index);
+                       return false;
+               }
+               last_entry = entry;
+
+               old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+               if (old_devtb_size != dev_table_size) {
+                       pr_err("The device table size of IOMMU:%d is not expected!\n",
+                               iommu->index);
+                       return false;
+               }
+       }
+
+       /*
+        * When SME is enabled in the first kernel, the entry includes the
+        * memory encryption mask(sme_me_mask), we must remove the memory
+        * encryption mask to obtain the true physical address in kdump kernel.
+        */
+       old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
+
+       if (old_devtb_phys >= 0x100000000ULL) {
+               pr_err("The address of old device table is above 4G, not trustworthy!\n");
+               return false;
+       }
+       old_devtb = (sme_active() && is_kdump_kernel())
+                   ? (__force void *)ioremap_encrypted(old_devtb_phys,
+                                                       dev_table_size)
+                   : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+
+       if (!old_devtb)
+               return false;
+
+       gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
+       old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+                               get_order(dev_table_size));
+       if (old_dev_tbl_cpy == NULL) {
+               pr_err("Failed to allocate memory for copying old device table!\n");
+               return false;
+       }
+
+       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+               old_dev_tbl_cpy[devid] = old_devtb[devid];
+               dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
+               dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
+
+               if (dte_v && dom_id) {
+                       old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+                       old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+                       __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
+                       /* If gcr3 table existed, mask it out */
+                       if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
+                               tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+                               tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+                               old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+                               tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
+                               tmp |= DTE_FLAG_GV;
+                               old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+                       }
+               }
+
+               irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
+               int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
+               int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
+               if (irq_v && (int_ctl || int_tab_len)) {
+                       if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
+                           (int_tab_len != DTE_IRQ_TABLE_LEN)) {
+                               pr_err("Wrong old irq remapping flag: %#x\n", devid);
+                               return false;
+                       }
+
+                       old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+               }
+       }
+       memunmap(old_devtb);
+
+       return true;
+}
+
+void amd_iommu_apply_erratum_63(u16 devid)
+{
+       int sysmgt;
+
+       sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
+                (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+
+       if (sysmgt == 0x01)
+               set_dev_entry_bit(devid, DEV_ENTRY_IW);
+}
+
+/* Writes the specific IOMMU for a device into the rlookup table */
+static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+{
+       amd_iommu_rlookup_table[devid] = iommu;
+}
+
+/*
+ * This function takes the device specific flags read from the ACPI
+ * table and sets up the device table entry with that information
+ */
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+                                          u16 devid, u32 flags, u32 ext_flags)
+{
+       if (flags & ACPI_DEVFLAG_INITPASS)
+               set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+       if (flags & ACPI_DEVFLAG_EXTINT)
+               set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+       if (flags & ACPI_DEVFLAG_NMI)
+               set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+       if (flags & ACPI_DEVFLAG_SYSMGT1)
+               set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+       if (flags & ACPI_DEVFLAG_SYSMGT2)
+               set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+       if (flags & ACPI_DEVFLAG_LINT0)
+               set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+       if (flags & ACPI_DEVFLAG_LINT1)
+               set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+
+       amd_iommu_apply_erratum_63(devid);
+
+       set_iommu_for_device(iommu, devid);
+}
+
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+{
+       struct devid_map *entry;
+       struct list_head *list;
+
+       if (type == IVHD_SPECIAL_IOAPIC)
+               list = &ioapic_map;
+       else if (type == IVHD_SPECIAL_HPET)
+               list = &hpet_map;
+       else
+               return -EINVAL;
+
+       list_for_each_entry(entry, list, list) {
+               if (!(entry->id == id && entry->cmd_line))
+                       continue;
+
+               pr_info("Command-line override present for %s id %d - ignoring\n",
+                       type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
+
+               *devid = entry->devid;
+
+               return 0;
+       }
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->id       = id;
+       entry->devid    = *devid;
+       entry->cmd_line = cmd_line;
+
+       list_add_tail(&entry->list, list);
+
+       return 0;
+}
+
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+                                     bool cmd_line)
+{
+       struct acpihid_map_entry *entry;
+       struct list_head *list = &acpihid_map;
+
+       list_for_each_entry(entry, list, list) {
+               if (strcmp(entry->hid, hid) ||
+                   (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
+                   !entry->cmd_line)
+                       continue;
+
+               pr_info("Command-line override for hid:%s uid:%s\n",
+                       hid, uid);
+               *devid = entry->devid;
+               return 0;
+       }
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       memcpy(entry->uid, uid, strlen(uid));
+       memcpy(entry->hid, hid, strlen(hid));
+       entry->devid = *devid;
+       entry->cmd_line = cmd_line;
+       entry->root_devid = (entry->devid & (~0x7));
+
+       pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
+               entry->cmd_line ? "cmd" : "ivrs",
+               entry->hid, entry->uid, entry->root_devid);
+
+       list_add_tail(&entry->list, list);
+       return 0;
+}
+
+static int __init add_early_maps(void)
+{
+       int i, ret;
+
+       for (i = 0; i < early_ioapic_map_size; ++i) {
+               ret = add_special_device(IVHD_SPECIAL_IOAPIC,
+                                        early_ioapic_map[i].id,
+                                        &early_ioapic_map[i].devid,
+                                        early_ioapic_map[i].cmd_line);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < early_hpet_map_size; ++i) {
+               ret = add_special_device(IVHD_SPECIAL_HPET,
+                                        early_hpet_map[i].id,
+                                        &early_hpet_map[i].devid,
+                                        early_hpet_map[i].cmd_line);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < early_acpihid_map_size; ++i) {
+               ret = add_acpi_hid_device(early_acpihid_map[i].hid,
+                                         early_acpihid_map[i].uid,
+                                         &early_acpihid_map[i].devid,
+                                         early_acpihid_map[i].cmd_line);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Reads the device exclusion range from ACPI and initializes the IOMMU with
+ * it
+ */
+static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
+{
+       if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
+               return;
+
+       /*
+        * Treat per-device exclusion ranges as r/w unity-mapped regions
+        * since some buggy BIOSes might lead to the overwritten exclusion
+        * range (exclusion_start and exclusion_length members). This
+        * happens when there are multiple exclusion ranges (IVMD entries)
+        * defined in ACPI table.
+        */
+       m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
+}
+
+/*
+ * Takes a pointer to an AMD IOMMU entry in the ACPI table and
+ * initializes the hardware and our data structures with it.
+ */
+static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
+                                       struct ivhd_header *h)
+{
+       u8 *p = (u8 *)h;
+       u8 *end = p, flags = 0;
+       u16 devid = 0, devid_start = 0, devid_to = 0;
+       u32 dev_i, ext_flags = 0;
+       bool alias = false;
+       struct ivhd_entry *e;
+       u32 ivhd_size;
+       int ret;
+
+
+       ret = add_early_maps();
+       if (ret)
+               return ret;
+
+       amd_iommu_apply_ivrs_quirks();
+
+       /*
+        * First save the recommended feature enable bits from ACPI
+        */
+       iommu->acpi_flags = h->flags;
+
+       /*
+        * Done. Now parse the device entries
+        */
+       ivhd_size = get_ivhd_header_size(h);
+       if (!ivhd_size) {
+               pr_err("Unsupported IVHD type %#x\n", h->type);
+               return -EINVAL;
+       }
+
+       p += ivhd_size;
+
+       end += h->length;
+
+
+       while (p < end) {
+               e = (struct ivhd_entry *)p;
+               switch (e->type) {
+               case IVHD_DEV_ALL:
+
+                       DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
+
+                       for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+                               set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
+                       break;
+               case IVHD_DEV_SELECT:
+
+                       DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+                                   "flags: %02x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags);
+
+                       devid = e->devid;
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+                       break;
+               case IVHD_DEV_SELECT_RANGE_START:
+
+                       DUMP_printk("  DEV_SELECT_RANGE_START\t "
+                                   "devid: %02x:%02x.%x flags: %02x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags);
+
+                       devid_start = e->devid;
+                       flags = e->flags;
+                       ext_flags = 0;
+                       alias = false;
+                       break;
+               case IVHD_DEV_ALIAS:
+
+                       DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+                                   "flags: %02x devid_to: %02x:%02x.%x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags,
+                                   PCI_BUS_NUM(e->ext >> 8),
+                                   PCI_SLOT(e->ext >> 8),
+                                   PCI_FUNC(e->ext >> 8));
+
+                       devid = e->devid;
+                       devid_to = e->ext >> 8;
+                       set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
+                       set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
+                       amd_iommu_alias_table[devid] = devid_to;
+                       break;
+               case IVHD_DEV_ALIAS_RANGE:
+
+                       DUMP_printk("  DEV_ALIAS_RANGE\t\t "
+                                   "devid: %02x:%02x.%x flags: %02x "
+                                   "devid_to: %02x:%02x.%x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags,
+                                   PCI_BUS_NUM(e->ext >> 8),
+                                   PCI_SLOT(e->ext >> 8),
+                                   PCI_FUNC(e->ext >> 8));
+
+                       devid_start = e->devid;
+                       flags = e->flags;
+                       devid_to = e->ext >> 8;
+                       ext_flags = 0;
+                       alias = true;
+                       break;
+               case IVHD_DEV_EXT_SELECT:
+
+                       DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+                                   "flags: %02x ext: %08x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags, e->ext);
+
+                       devid = e->devid;
+                       set_dev_entry_from_acpi(iommu, devid, e->flags,
+                                               e->ext);
+                       break;
+               case IVHD_DEV_EXT_SELECT_RANGE:
+
+                       DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
+                                   "%02x:%02x.%x flags: %02x ext: %08x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid),
+                                   e->flags, e->ext);
+
+                       devid_start = e->devid;
+                       flags = e->flags;
+                       ext_flags = e->ext;
+                       alias = false;
+                       break;
+               case IVHD_DEV_RANGE_END:
+
+                       DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
+                                   PCI_BUS_NUM(e->devid),
+                                   PCI_SLOT(e->devid),
+                                   PCI_FUNC(e->devid));
+
+                       devid = e->devid;
+                       for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
+                               if (alias) {
+                                       amd_iommu_alias_table[dev_i] = devid_to;
+                                       set_dev_entry_from_acpi(iommu,
+                                               devid_to, flags, ext_flags);
+                               }
+                               set_dev_entry_from_acpi(iommu, dev_i,
+                                                       flags, ext_flags);
+                       }
+                       break;
+               case IVHD_DEV_SPECIAL: {
+                       u8 handle, type;
+                       const char *var;
+                       u16 devid;
+                       int ret;
+
+                       handle = e->ext & 0xff;
+                       devid  = (e->ext >>  8) & 0xffff;
+                       type   = (e->ext >> 24) & 0xff;
+
+                       if (type == IVHD_SPECIAL_IOAPIC)
+                               var = "IOAPIC";
+                       else if (type == IVHD_SPECIAL_HPET)
+                               var = "HPET";
+                       else
+                               var = "UNKNOWN";
+
+                       DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+                                   var, (int)handle,
+                                   PCI_BUS_NUM(devid),
+                                   PCI_SLOT(devid),
+                                   PCI_FUNC(devid));
+
+                       ret = add_special_device(type, handle, &devid, false);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * add_special_device might update the devid in case a
+                        * command-line override is present. So call
+                        * set_dev_entry_from_acpi after add_special_device.
+                        */
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+                       break;
+               }
+               case IVHD_DEV_ACPI_HID: {
+                       u16 devid;
+                       u8 hid[ACPIHID_HID_LEN];
+                       u8 uid[ACPIHID_UID_LEN];
+                       int ret;
+
+                       if (h->type != 0x40) {
+                               pr_err(FW_BUG "Invalid IVHD device type %#x\n",
+                                      e->type);
+                               break;
+                       }
+
+                       memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+                       hid[ACPIHID_HID_LEN - 1] = '\0';
+
+                       if (!(*hid)) {
+                               pr_err(FW_BUG "Invalid HID.\n");
+                               break;
+                       }
+
+                       uid[0] = '\0';
+                       switch (e->uidf) {
+                       case UID_NOT_PRESENT:
+
+                               if (e->uidl != 0)
+                                       pr_warn(FW_BUG "Invalid UID length.\n");
+
+                               break;
+                       case UID_IS_INTEGER:
+
+                               sprintf(uid, "%d", e->uid);
+
+                               break;
+                       case UID_IS_CHARACTER:
+
+                               memcpy(uid, &e->uid, e->uidl);
+                               uid[e->uidl] = '\0';
+
+                               break;
+                       default:
+                               break;
+                       }
+
+                       devid = e->devid;
+                       DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
+                                   hid, uid,
+                                   PCI_BUS_NUM(devid),
+                                   PCI_SLOT(devid),
+                                   PCI_FUNC(devid));
+
+                       flags = e->flags;
+
+                       ret = add_acpi_hid_device(hid, uid, &devid, false);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * add_special_device might update the devid in case a
+                        * command-line override is present. So call
+                        * set_dev_entry_from_acpi after add_special_device.
+                        */
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+                       break;
+               }
+               default:
+                       break;
+               }
+
+               p += ivhd_entry_length(p);
+       }
+
+       return 0;
+}
+
+static void __init free_iommu_one(struct amd_iommu *iommu)
+{
+       free_command_buffer(iommu);
+       free_event_buffer(iommu);
+       free_ppr_log(iommu);
+       free_ga_log(iommu);
+       iommu_unmap_mmio_space(iommu);
+}
+
+static void __init free_iommu_all(void)
+{
+       struct amd_iommu *iommu, *next;
+
+       for_each_iommu_safe(iommu, next) {
+               list_del(&iommu->list);
+               free_iommu_one(iommu);
+               kfree(iommu);
+       }
+}
+
+/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ *     BIOS should disable L2B micellaneous clock gating by setting
+ *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+       u32 value;
+
+       if ((boot_cpu_data.x86 != 0x15) ||
+           (boot_cpu_data.x86_model < 0x10) ||
+           (boot_cpu_data.x86_model > 0x1f))
+               return;
+
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+       pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+       if (value & BIT(2))
+               return;
+
+       /* Select NB indirect register 0x90 and enable writing */
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+       pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+       pci_info(iommu->dev, "Applying erratum 746 workaround\n");
+
+       /* Clear the enable writing bit */
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
+/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ *     BIOS should enable ATS write permission check by setting
+ *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+       u32 value;
+
+       if ((boot_cpu_data.x86 != 0x15) ||
+           (boot_cpu_data.x86_model < 0x30) ||
+           (boot_cpu_data.x86_model > 0x3f))
+               return;
+
+       /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+       value = iommu_read_l2(iommu, 0x47);
+
+       if (value & BIT(0))
+               return;
+
+       /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+       iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+       pci_info(iommu->dev, "Applying ATS write check workaround\n");
+}
+
+/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+ */
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+{
+       int ret;
+
+       raw_spin_lock_init(&iommu->lock);
+
+       /* Add IOMMU to internal data structures */
+       list_add_tail(&iommu->list, &amd_iommu_list);
+       iommu->index = amd_iommus_present++;
+
+       if (unlikely(iommu->index >= MAX_IOMMUS)) {
+               WARN(1, "System has more IOMMUs than supported by this driver\n");
+               return -ENOSYS;
+       }
+
+       /* Index is fine - add IOMMU to the array */
+       amd_iommus[iommu->index] = iommu;
+
+       /*
+        * Copy data from ACPI table entry to the iommu struct
+        */
+       iommu->devid   = h->devid;
+       iommu->cap_ptr = h->cap_ptr;
+       iommu->pci_seg = h->pci_seg;
+       iommu->mmio_phys = h->mmio_phys;
+
+       switch (h->type) {
+       case 0x10:
+               /* Check if IVHD EFR contains proper max banks/counters */
+               if ((h->efr_attr != 0) &&
+                   ((h->efr_attr & (0xF << 13)) != 0) &&
+                   ((h->efr_attr & (0x3F << 17)) != 0))
+                       iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+               else
+                       iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+               if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+               break;
+       case 0x11:
+       case 0x40:
+               if (h->efr_reg & (1 << 9))
+                       iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+               else
+                       iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+               if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+               /*
+                * Note: Since iommu_update_intcapxt() leverages
+                * the IOMMU MMIO access to MSI capability block registers
+                * for MSI address lo/hi/data, we need to check both
+                * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+                */
+               if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+                   (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+                       amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
+                                               iommu->mmio_phys_end);
+       if (!iommu->mmio_base)
+               return -ENOMEM;
+
+       if (alloc_command_buffer(iommu))
+               return -ENOMEM;
+
+       if (alloc_event_buffer(iommu))
+               return -ENOMEM;
+
+       iommu->int_enabled = false;
+
+       init_translation_status(iommu);
+       if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
+               iommu_disable(iommu);
+               clear_translation_pre_enabled(iommu);
+               pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
+                       iommu->index);
+       }
+       if (amd_iommu_pre_enabled)
+               amd_iommu_pre_enabled = translation_pre_enabled(iommu);
+
+       ret = init_iommu_from_acpi(iommu, h);
+       if (ret)
+               return ret;
+
+       ret = amd_iommu_create_irq_domain(iommu);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure IOMMU is not considered to translate itself. The IVRS
+        * table tells us so, but this is a lie!
+        */
+       amd_iommu_rlookup_table[iommu->devid] = NULL;
+
+       return 0;
+}
+
+/**
+ * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
+ * @ivrs          Pointer to the IVRS header
+ *
+ * This function search through all IVDB of the maximum supported IVHD
+ */
+static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
+{
+       u8 *base = (u8 *)ivrs;
+       struct ivhd_header *ivhd = (struct ivhd_header *)
+                                       (base + IVRS_HEADER_LENGTH);
+       u8 last_type = ivhd->type;
+       u16 devid = ivhd->devid;
+
+       while (((u8 *)ivhd - base < ivrs->length) &&
+              (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
+               u8 *p = (u8 *) ivhd;
+
+               if (ivhd->devid == devid)
+                       last_type = ivhd->type;
+               ivhd = (struct ivhd_header *)(p + ivhd->length);
+       }
+
+       return last_type;
+}
+
+/*
+ * Iterates over all IOMMU entries in the ACPI table, allocates the
+ * IOMMU structure and initializes it with init_iommu_one()
+ */
+static int __init init_iommu_all(struct acpi_table_header *table)
+{
+       u8 *p = (u8 *)table, *end = (u8 *)table;
+       struct ivhd_header *h;
+       struct amd_iommu *iommu;
+       int ret;
+
+       end += table->length;
+       p += IVRS_HEADER_LENGTH;
+
+       while (p < end) {
+               h = (struct ivhd_header *)p;
+               if (*p == amd_iommu_target_ivhd_type) {
+
+                       DUMP_printk("device: %02x:%02x.%01x cap: %04x "
+                                   "seg: %d flags: %01x info %04x\n",
+                                   PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
+                                   PCI_FUNC(h->devid), h->cap_ptr,
+                                   h->pci_seg, h->flags, h->info);
+                       DUMP_printk("       mmio-addr: %016llx\n",
+                                   h->mmio_phys);
+
+                       iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
+                       if (iommu == NULL)
+                               return -ENOMEM;
+
+                       ret = init_iommu_one(iommu, h);
+                       if (ret)
+                               return ret;
+               }
+               p += h->length;
+
+       }
+       WARN_ON(p != end);
+
+       return 0;
+}
+
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+                               u8 fxn, u64 *value, bool is_write);
+
+static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+{
+       struct pci_dev *pdev = iommu->dev;
+       u64 val = 0xabcd, val2 = 0, save_reg = 0;
+
+       if (!iommu_feature(iommu, FEATURE_PC))
+               return;
+
+       amd_iommu_pc_present = true;
+
+       /* save the value to restore, if writable */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+               goto pc_false;
+
+       /* Check if the performance counters can be written to */
+       if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
+           (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
+           (val != val2))
+               goto pc_false;
+
+       /* restore */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+               goto pc_false;
+
+       pci_info(pdev, "IOMMU performance counters supported\n");
+
+       val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
+       iommu->max_banks = (u8) ((val >> 12) & 0x3f);
+       iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+       return;
+
+pc_false:
+       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+       amd_iommu_pc_present = false;
+       return;
+}
+
+static ssize_t amd_iommu_show_cap(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
+       return sprintf(buf, "%x\n", iommu->cap);
+}
+static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
+
+static ssize_t amd_iommu_show_features(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
+       return sprintf(buf, "%llx\n", iommu->features);
+}
+static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
+
+static struct attribute *amd_iommu_attrs[] = {
+       &dev_attr_cap.attr,
+       &dev_attr_features.attr,
+       NULL,
+};
+
+static struct attribute_group amd_iommu_group = {
+       .name = "amd-iommu",
+       .attrs = amd_iommu_attrs,
+};
+
+static const struct attribute_group *amd_iommu_groups[] = {
+       &amd_iommu_group,
+       NULL,
+};
+
+static int __init iommu_init_pci(struct amd_iommu *iommu)
+{
+       int cap_ptr = iommu->cap_ptr;
+       int ret;
+
+       iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
+                                                iommu->devid & 0xff);
+       if (!iommu->dev)
+               return -ENODEV;
+
+       /* Prevent binding other PCI device drivers to IOMMU devices */
+       iommu->dev->match_driver = false;
+
+       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+                             &iommu->cap);
+
+       if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
+               amd_iommu_iotlb_sup = false;
+
+       /* read extended feature bits */
+       iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+       if (iommu_feature(iommu, FEATURE_GT)) {
+               int glxval;
+               u32 max_pasid;
+               u64 pasmax;
+
+               pasmax = iommu->features & FEATURE_PASID_MASK;
+               pasmax >>= FEATURE_PASID_SHIFT;
+               max_pasid  = (1 << (pasmax + 1)) - 1;
+
+               amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
+
+               BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
+
+               glxval   = iommu->features & FEATURE_GLXVAL_MASK;
+               glxval >>= FEATURE_GLXVAL_SHIFT;
+
+               if (amd_iommu_max_glx_val == -1)
+                       amd_iommu_max_glx_val = glxval;
+               else
+                       amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+       }
+
+       if (iommu_feature(iommu, FEATURE_GT) &&
+           iommu_feature(iommu, FEATURE_PPR)) {
+               iommu->is_iommu_v2   = true;
+               amd_iommu_v2_present = true;
+       }
+
+       if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+               return -ENOMEM;
+
+       ret = iommu_init_ga(iommu);
+       if (ret)
+               return ret;
+
+       if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+               amd_iommu_np_cache = true;
+
+       init_iommu_perf_ctr(iommu);
+
+       if (is_rd890_iommu(iommu->dev)) {
+               int i, j;
+
+               iommu->root_pdev =
+                       pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
+                                                   PCI_DEVFN(0, 0));
+
+               /*
+                * Some rd890 systems may not be fully reconfigured by the
+                * BIOS, so it's necessary for us to store this information so
+                * it can be reprogrammed on resume
+                */
+               pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                               &iommu->stored_addr_lo);
+               pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+                               &iommu->stored_addr_hi);
+
+               /* Low bit locks writes to configuration space */
+               iommu->stored_addr_lo &= ~1;
+
+               for (i = 0; i < 6; i++)
+                       for (j = 0; j < 0x12; j++)
+                               iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+               for (i = 0; i < 0x83; i++)
+                       iommu->stored_l2[i] = iommu_read_l2(iommu, i);
+       }
+
+       amd_iommu_erratum_746_workaround(iommu);
+       amd_iommu_ats_write_check_workaround(iommu);
+
+       iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+                              amd_iommu_groups, "ivhd%d", iommu->index);
+       iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
+       iommu_device_register(&iommu->iommu);
+
+       return pci_enable_device(iommu->dev);
+}
+
+static void print_iommu_info(void)
+{
+       static const char * const feat_str[] = {
+               "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
+               "IA", "GA", "HE", "PC"
+       };
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu) {
+               struct pci_dev *pdev = iommu->dev;
+               int i;
+
+               pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
+
+               if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
+                       pci_info(pdev, "Extended features (%#llx):\n",
+                                iommu->features);
+                       for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+                               if (iommu_feature(iommu, (1ULL << i)))
+                                       pr_cont(" %s", feat_str[i]);
+                       }
+
+                       if (iommu->features & FEATURE_GAM_VAPIC)
+                               pr_cont(" GA_vAPIC");
+
+                       pr_cont("\n");
+               }
+       }
+       if (irq_remapping_enabled) {
+               pr_info("Interrupt remapping enabled\n");
+               if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+                       pr_info("Virtual APIC enabled\n");
+               if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+                       pr_info("X2APIC enabled\n");
+       }
+}
+
+static int __init amd_iommu_init_pci(void)
+{
+       struct amd_iommu *iommu;
+       int ret = 0;
+
+       for_each_iommu(iommu) {
+               ret = iommu_init_pci(iommu);
+               if (ret)
+                       break;
+       }
+
+       /*
+        * Order is important here to make sure any unity map requirements are
+        * fulfilled. The unity mappings are created and written to the device
+        * table during the amd_iommu_init_api() call.
+        *
+        * After that we call init_device_table_dma() to make sure any
+        * uninitialized DTE will block DMA, and in the end we flush the caches
+        * of all IOMMUs to make sure the changes to the device table are
+        * active.
+        */
+       ret = amd_iommu_init_api();
+
+       init_device_table_dma();
+
+       for_each_iommu(iommu)
+               iommu_flush_all_caches(iommu);
+
+       if (!ret)
+               print_iommu_info();
+
+       return ret;
+}
+
+/****************************************************************************
+ *
+ * The following functions initialize the MSI interrupts for all IOMMUs
+ * in the system. It's a bit challenging because there could be multiple
+ * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
+ * pci_dev.
+ *
+ ****************************************************************************/
+
+static int iommu_setup_msi(struct amd_iommu *iommu)
+{
+       int r;
+
+       r = pci_enable_msi(iommu->dev);
+       if (r)
+               return r;
+
+       r = request_threaded_irq(iommu->dev->irq,
+                                amd_iommu_int_handler,
+                                amd_iommu_int_thread,
+                                0, "AMD-Vi",
+                                iommu);
+
+       if (r) {
+               pci_disable_msi(iommu->dev);
+               return r;
+       }
+
+       iommu->int_enabled = true;
+
+       return 0;
+}
+
+#define XT_INT_DEST_MODE(x)    (((x) & 0x1ULL) << 2)
+#define XT_INT_DEST_LO(x)      (((x) & 0xFFFFFFULL) << 8)
+#define XT_INT_VEC(x)          (((x) & 0xFFULL) << 32)
+#define XT_INT_DEST_HI(x)      ((((x) >> 24) & 0xFFULL) << 56)
+
+/**
+ * Setup the IntCapXT registers with interrupt routing information
+ * based on the PCI MSI capability block registers, accessed via
+ * MMIO MSI address low/hi and MSI data registers.
+ */
+static void iommu_update_intcapxt(struct amd_iommu *iommu)
+{
+       u64 val;
+       u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
+       u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
+       u32 data    = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
+       bool dm     = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+       u32 dest    = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
+
+       if (x2apic_enabled())
+               dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
+
+       val = XT_INT_VEC(data & 0xFF) |
+             XT_INT_DEST_MODE(dm) |
+             XT_INT_DEST_LO(dest) |
+             XT_INT_DEST_HI(dest);
+
+       /**
+        * Current IOMMU implemtation uses the same IRQ for all
+        * 3 IOMMU interrupts.
+        */
+       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+static void _irq_notifier_notify(struct irq_affinity_notify *notify,
+                                const cpumask_t *mask)
+{
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu) {
+               if (iommu->dev->irq == notify->irq) {
+                       iommu_update_intcapxt(iommu);
+                       break;
+               }
+       }
+}
+
+static void _irq_notifier_release(struct kref *ref)
+{
+}
+
+static int iommu_init_intcapxt(struct amd_iommu *iommu)
+{
+       int ret;
+       struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
+
+       /**
+        * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+        * which can be inferred from amd_iommu_xt_mode.
+        */
+       if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
+               return 0;
+
+       /**
+        * Also, we need to setup notifier to update the IntCapXT registers
+        * whenever the irq affinity is changed from user-space.
+        */
+       notify->irq = iommu->dev->irq;
+       notify->notify = _irq_notifier_notify,
+       notify->release = _irq_notifier_release,
+       ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
+       if (ret) {
+               pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
+                      iommu->devid, iommu->dev->irq);
+               return ret;
+       }
+
+       iommu_update_intcapxt(iommu);
+       iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+       return ret;
+}
+
+static int iommu_init_msi(struct amd_iommu *iommu)
+{
+       int ret;
+
+       if (iommu->int_enabled)
+               goto enable_faults;
+
+       if (iommu->dev->msi_cap)
+               ret = iommu_setup_msi(iommu);
+       else
+               ret = -ENODEV;
+
+       if (ret)
+               return ret;
+
+enable_faults:
+       ret = iommu_init_intcapxt(iommu);
+       if (ret)
+               return ret;
+
+       iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+
+       if (iommu->ppr_log != NULL)
+               iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
+
+       iommu_ga_log_enable(iommu);
+
+       return 0;
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the third pass of parsing the ACPI
+ * table. In this last pass the memory mapping requirements are
+ * gathered (like exclusion and unity mapping ranges).
+ *
+ ****************************************************************************/
+
+static void __init free_unity_maps(void)
+{
+       struct unity_map_entry *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
+               list_del(&entry->list);
+               kfree(entry);
+       }
+}
+
+/* called when we find an exclusion range definition in ACPI */
+static int __init init_exclusion_range(struct ivmd_header *m)
+{
+       int i;
+
+       switch (m->type) {
+       case ACPI_IVMD_TYPE:
+               set_device_exclusion_range(m->devid, m);
+               break;
+       case ACPI_IVMD_TYPE_ALL:
+               for (i = 0; i <= amd_iommu_last_bdf; ++i)
+                       set_device_exclusion_range(i, m);
+               break;
+       case ACPI_IVMD_TYPE_RANGE:
+               for (i = m->devid; i <= m->aux; ++i)
+                       set_device_exclusion_range(i, m);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+/* called for unity map ACPI definition */
+static int __init init_unity_map_range(struct ivmd_header *m)
+{
+       struct unity_map_entry *e = NULL;
+       char *s;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+
+       if (m->flags & IVMD_FLAG_EXCL_RANGE)
+               init_exclusion_range(m);
+
+       switch (m->type) {
+       default:
+               kfree(e);
+               return 0;
+       case ACPI_IVMD_TYPE:
+               s = "IVMD_TYPEi\t\t\t";
+               e->devid_start = e->devid_end = m->devid;
+               break;
+       case ACPI_IVMD_TYPE_ALL:
+               s = "IVMD_TYPE_ALL\t\t";
+               e->devid_start = 0;
+               e->devid_end = amd_iommu_last_bdf;
+               break;
+       case ACPI_IVMD_TYPE_RANGE:
+               s = "IVMD_TYPE_RANGE\t\t";
+               e->devid_start = m->devid;
+               e->devid_end = m->aux;
+               break;
+       }
+       e->address_start = PAGE_ALIGN(m->range_start);
+       e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
+       e->prot = m->flags >> 1;
+
+       DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
+                   " range_start: %016llx range_end: %016llx flags: %x\n", s,
+                   PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
+                   PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
+                   PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
+                   e->address_start, e->address_end, m->flags);
+
+       list_add_tail(&e->list, &amd_iommu_unity_map);
+
+       return 0;
+}
+
+/* iterates over all memory definitions we find in the ACPI table */
+static int __init init_memory_definitions(struct acpi_table_header *table)
+{
+       u8 *p = (u8 *)table, *end = (u8 *)table;
+       struct ivmd_header *m;
+
+       end += table->length;
+       p += IVRS_HEADER_LENGTH;
+
+       while (p < end) {
+               m = (struct ivmd_header *)p;
+               if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
+                       init_unity_map_range(m);
+
+               p += m->length;
+       }
+
+       return 0;
+}
+
+/*
+ * Init the device table to not allow DMA access for devices
+ */
+static void init_device_table_dma(void)
+{
+       u32 devid;
+
+       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+               set_dev_entry_bit(devid, DEV_ENTRY_VALID);
+               set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+       }
+}
+
+static void __init uninit_device_table_dma(void)
+{
+       u32 devid;
+
+       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+               amd_iommu_dev_table[devid].data[0] = 0ULL;
+               amd_iommu_dev_table[devid].data[1] = 0ULL;
+       }
+}
+
+static void init_device_table(void)
+{
+       u32 devid;
+
+       if (!amd_iommu_irq_remap)
+               return;
+
+       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+               set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+}
+
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+       /*
+        * make IOMMU memory accesses cache coherent
+        */
+       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+       /* Set IOTLB invalidation timeout to 1s */
+       iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+}
+
+static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+{
+       int i, j;
+       u32 ioc_feature_control;
+       struct pci_dev *pdev = iommu->root_pdev;
+
+       /* RD890 BIOSes may not have completely reconfigured the iommu */
+       if (!is_rd890_iommu(iommu->dev) || !pdev)
+               return;
+
+       /*
+        * First, we need to ensure that the iommu is enabled. This is
+        * controlled by a register in the northbridge
+        */
+
+       /* Select Northbridge indirect register 0x75 and enable writing */
+       pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+       pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
+
+       /* Enable the iommu */
+       if (!(ioc_feature_control & 0x1))
+               pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+       /* Restore the iommu BAR */
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                              iommu->stored_addr_lo);
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
+                              iommu->stored_addr_hi);
+
+       /* Restore the l1 indirect regs for each of the 6 l1s */
+       for (i = 0; i < 6; i++)
+               for (j = 0; j < 0x12; j++)
+                       iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
+
+       /* Restore the l2 indirect regs */
+       for (i = 0; i < 0x83; i++)
+               iommu_write_l2(iommu, i, iommu->stored_l2[i]);
+
+       /* Lock PCI setup registers */
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                              iommu->stored_addr_lo | 1);
+}
+
+static void iommu_enable_ga(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+       switch (amd_iommu_guest_ir) {
+       case AMD_IOMMU_GUEST_IR_VAPIC:
+               iommu_feature_enable(iommu, CONTROL_GAM_EN);
+               /* Fall through */
+       case AMD_IOMMU_GUEST_IR_LEGACY_GA:
+               iommu_feature_enable(iommu, CONTROL_GA_EN);
+               iommu->irte_ops = &irte_128_ops;
+               break;
+       default:
+               iommu->irte_ops = &irte_32_ops;
+               break;
+       }
+#endif
+}
+
+static void early_enable_iommu(struct amd_iommu *iommu)
+{
+       iommu_disable(iommu);
+       iommu_init_flags(iommu);
+       iommu_set_device_table(iommu);
+       iommu_enable_command_buffer(iommu);
+       iommu_enable_event_buffer(iommu);
+       iommu_set_exclusion_range(iommu);
+       iommu_enable_ga(iommu);
+       iommu_enable_xt(iommu);
+       iommu_enable(iommu);
+       iommu_flush_all_caches(iommu);
+}
+
+/*
+ * This function finally enables all IOMMUs found in the system after
+ * they have been initialized.
+ *
+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
+ * the old content of device table entries. Not this case or copy failed,
+ * just continue as normal kernel does.
+ */
+static void early_enable_iommus(void)
+{
+       struct amd_iommu *iommu;
+
+
+       if (!copy_device_table()) {
+               /*
+                * If come here because of failure in copying device table from old
+                * kernel with all IOMMUs enabled, print error message and try to
+                * free allocated old_dev_tbl_cpy.
+                */
+               if (amd_iommu_pre_enabled)
+                       pr_err("Failed to copy DEV table from previous kernel.\n");
+               if (old_dev_tbl_cpy != NULL)
+                       free_pages((unsigned long)old_dev_tbl_cpy,
+                                       get_order(dev_table_size));
+
+               for_each_iommu(iommu) {
+                       clear_translation_pre_enabled(iommu);
+                       early_enable_iommu(iommu);
+               }
+       } else {
+               pr_info("Copied DEV table from previous kernel.\n");
+               free_pages((unsigned long)amd_iommu_dev_table,
+                               get_order(dev_table_size));
+               amd_iommu_dev_table = old_dev_tbl_cpy;
+               for_each_iommu(iommu) {
+                       iommu_disable_command_buffer(iommu);
+                       iommu_disable_event_buffer(iommu);
+                       iommu_enable_command_buffer(iommu);
+                       iommu_enable_event_buffer(iommu);
+                       iommu_enable_ga(iommu);
+                       iommu_enable_xt(iommu);
+                       iommu_set_device_table(iommu);
+                       iommu_flush_all_caches(iommu);
+               }
+       }
+
+#ifdef CONFIG_IRQ_REMAP
+       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+               amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+#endif
+}
+
+static void enable_iommus_v2(void)
+{
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu) {
+               iommu_enable_ppr_log(iommu);
+               iommu_enable_gt(iommu);
+       }
+}
+
+static void enable_iommus(void)
+{
+       early_enable_iommus();
+
+       enable_iommus_v2();
+}
+
+static void disable_iommus(void)
+{
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu)
+               iommu_disable(iommu);
+
+#ifdef CONFIG_IRQ_REMAP
+       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+               amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
+#endif
+}
+
+/*
+ * Suspend/Resume support
+ * disable suspend until real resume implemented
+ */
+
+static void amd_iommu_resume(void)
+{
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu)
+               iommu_apply_resume_quirks(iommu);
+
+       /* re-load the hardware */
+       enable_iommus();
+
+       amd_iommu_enable_interrupts();
+}
+
+static int amd_iommu_suspend(void)
+{
+       /* disable IOMMUs to go out of the way for BIOS */
+       disable_iommus();
+
+       return 0;
+}
+
+static struct syscore_ops amd_iommu_syscore_ops = {
+       .suspend = amd_iommu_suspend,
+       .resume = amd_iommu_resume,
+};
+
+static void __init free_iommu_resources(void)
+{
+       kmemleak_free(irq_lookup_table);
+       free_pages((unsigned long)irq_lookup_table,
+                  get_order(rlookup_table_size));
+       irq_lookup_table = NULL;
+
+       kmem_cache_destroy(amd_iommu_irq_cache);
+       amd_iommu_irq_cache = NULL;
+
+       free_pages((unsigned long)amd_iommu_rlookup_table,
+                  get_order(rlookup_table_size));
+       amd_iommu_rlookup_table = NULL;
+
+       free_pages((unsigned long)amd_iommu_alias_table,
+                  get_order(alias_table_size));
+       amd_iommu_alias_table = NULL;
+
+       free_pages((unsigned long)amd_iommu_dev_table,
+                  get_order(dev_table_size));
+       amd_iommu_dev_table = NULL;
+
+       free_iommu_all();
+}
+
+/* SB IOAPIC is always on this device in AMD systems */
+#define IOAPIC_SB_DEVID                ((0x00 << 8) | PCI_DEVFN(0x14, 0))
+
+static bool __init check_ioapic_information(void)
+{
+       const char *fw_bug = FW_BUG;
+       bool ret, has_sb_ioapic;
+       int idx;
+
+       has_sb_ioapic = false;
+       ret           = false;
+
+       /*
+        * If we have map overrides on the kernel command line the
+        * messages in this function might not describe firmware bugs
+        * anymore - so be careful
+        */
+       if (cmdline_maps)
+               fw_bug = "";
+
+       for (idx = 0; idx < nr_ioapics; idx++) {
+               int devid, id = mpc_ioapic_id(idx);
+
+               devid = get_ioapic_devid(id);
+               if (devid < 0) {
+                       pr_err("%s: IOAPIC[%d] not in IVRS table\n",
+                               fw_bug, id);
+                       ret = false;
+               } else if (devid == IOAPIC_SB_DEVID) {
+                       has_sb_ioapic = true;
+                       ret           = true;
+               }
+       }
+
+       if (!has_sb_ioapic) {
+               /*
+                * We expect the SB IOAPIC to be listed in the IVRS
+                * table. The system timer is connected to the SB IOAPIC
+                * and if we don't have it in the list the system will
+                * panic at boot time.  This situation usually happens
+                * when the BIOS is buggy and provides us the wrong
+                * device id for the IOAPIC in the system.
+                */
+               pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
+       }
+
+       if (!ret)
+               pr_err("Disabling interrupt remapping\n");
+
+       return ret;
+}
+
+static void __init free_dma_resources(void)
+{
+       free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
+                  get_order(MAX_DOMAIN_ID/8));
+       amd_iommu_pd_alloc_bitmap = NULL;
+
+       free_unity_maps();
+}
+
+/*
+ * This is the hardware init function for AMD IOMMU in the system.
+ * This function is called either from amd_iommu_init or from the interrupt
+ * remapping setup code.
+ *
+ * This function basically parses the ACPI table for AMD IOMMU (IVRS)
+ * four times:
+ *
+ *     1 pass) Discover the most comprehensive IVHD type to use.
+ *
+ *     2 pass) Find the highest PCI device id the driver has to handle.
+ *             Upon this information the size of the data structures is
+ *             determined that needs to be allocated.
+ *
+ *     3 pass) Initialize the data structures just allocated with the
+ *             information in the ACPI table about available AMD IOMMUs
+ *             in the system. It also maps the PCI devices in the
+ *             system to specific IOMMUs
+ *
+ *     4 pass) After the basic data structures are allocated and
+ *             initialized we update them with information about memory
+ *             remapping requirements parsed out of the ACPI table in
+ *             this last pass.
+ *
+ * After everything is set up the IOMMUs are enabled and the necessary
+ * hotplug and suspend notifiers are registered.
+ */
+static int __init early_amd_iommu_init(void)
+{
+       struct acpi_table_header *ivrs_base;
+       acpi_status status;
+       int i, remap_cache_sz, ret = 0;
+       u32 pci_id;
+
+       if (!amd_iommu_detected)
+               return -ENODEV;
+
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
+       if (status == AE_NOT_FOUND)
+               return -ENODEV;
+       else if (ACPI_FAILURE(status)) {
+               const char *err = acpi_format_exception(status);
+               pr_err("IVRS table error: %s\n", err);
+               return -EINVAL;
+       }
+
+       /*
+        * Validate checksum here so we don't need to do it when
+        * we actually parse the table
+        */
+       ret = check_ivrs_checksum(ivrs_base);
+       if (ret)
+               goto out;
+
+       amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
+       DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
+
+       /*
+        * First parse ACPI tables to find the largest Bus/Dev/Func
+        * we need to handle. Upon this information the shared data
+        * structures for the IOMMUs in the system will be allocated
+        */
+       ret = find_last_devid_acpi(ivrs_base);
+       if (ret)
+               goto out;
+
+       dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
+       alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
+       rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
+
+       /* Device table - directly used by all IOMMUs */
+       ret = -ENOMEM;
+       amd_iommu_dev_table = (void *)__get_free_pages(
+                                     GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+                                     get_order(dev_table_size));
+       if (amd_iommu_dev_table == NULL)
+               goto out;
+
+       /*
+        * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
+        * IOMMU see for that device
+        */
+       amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
+                       get_order(alias_table_size));
+       if (amd_iommu_alias_table == NULL)
+               goto out;
+
+       /* IOMMU rlookup table - find the IOMMU for a specific device */
+       amd_iommu_rlookup_table = (void *)__get_free_pages(
+                       GFP_KERNEL | __GFP_ZERO,
+                       get_order(rlookup_table_size));
+       if (amd_iommu_rlookup_table == NULL)
+               goto out;
+
+       amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
+                                           GFP_KERNEL | __GFP_ZERO,
+                                           get_order(MAX_DOMAIN_ID/8));
+       if (amd_iommu_pd_alloc_bitmap == NULL)
+               goto out;
+
+       /*
+        * let all alias entries point to itself
+        */
+       for (i = 0; i <= amd_iommu_last_bdf; ++i)
+               amd_iommu_alias_table[i] = i;
+
+       /*
+        * never allocate domain 0 because its used as the non-allocated and
+        * error value placeholder
+        */
+       __set_bit(0, amd_iommu_pd_alloc_bitmap);
+
+       /*
+        * now the data structures are allocated and basically initialized
+        * start the real acpi table scan
+        */
+       ret = init_iommu_all(ivrs_base);
+       if (ret)
+               goto out;
+
+       /* Disable IOMMU if there's Stoney Ridge graphics */
+       for (i = 0; i < 32; i++) {
+               pci_id = read_pci_config(0, i, 0, 0);
+               if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+                       pr_info("Disable IOMMU on Stoney Ridge\n");
+                       amd_iommu_disabled = true;
+                       break;
+               }
+       }
+
+       /* Disable any previously enabled IOMMUs */
+       if (!is_kdump_kernel() || amd_iommu_disabled)
+               disable_iommus();
+
+       if (amd_iommu_irq_remap)
+               amd_iommu_irq_remap = check_ioapic_information();
+
+       if (amd_iommu_irq_remap) {
+               /*
+                * Interrupt remapping enabled, create kmem_cache for the
+                * remapping tables.
+                */
+               ret = -ENOMEM;
+               if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+                       remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
+               else
+                       remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
+               amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
+                                                       remap_cache_sz,
+                                                       IRQ_TABLE_ALIGNMENT,
+                                                       0, NULL);
+               if (!amd_iommu_irq_cache)
+                       goto out;
+
+               irq_lookup_table = (void *)__get_free_pages(
+                               GFP_KERNEL | __GFP_ZERO,
+                               get_order(rlookup_table_size));
+               kmemleak_alloc(irq_lookup_table, rlookup_table_size,
+                              1, GFP_KERNEL);
+               if (!irq_lookup_table)
+                       goto out;
+       }
+
+       ret = init_memory_definitions(ivrs_base);
+       if (ret)
+               goto out;
+
+       /* init the device table */
+       init_device_table();
+
+out:
+       /* Don't leak any ACPI memory */
+       acpi_put_table(ivrs_base);
+       ivrs_base = NULL;
+
+       return ret;
+}
+
+static int amd_iommu_enable_interrupts(void)
+{
+       struct amd_iommu *iommu;
+       int ret = 0;
+
+       for_each_iommu(iommu) {
+               ret = iommu_init_msi(iommu);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       return ret;
+}
+
+static bool detect_ivrs(void)
+{
+       struct acpi_table_header *ivrs_base;
+       acpi_status status;
+
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
+       if (status == AE_NOT_FOUND)
+               return false;
+       else if (ACPI_FAILURE(status)) {
+               const char *err = acpi_format_exception(status);
+               pr_err("IVRS table error: %s\n", err);
+               return false;
+       }
+
+       acpi_put_table(ivrs_base);
+
+       /* Make sure ACS will be enabled during PCI probe */
+       pci_request_acs();
+
+       return true;
+}
+
+/****************************************************************************
+ *
+ * AMD IOMMU Initialization State Machine
+ *
+ ****************************************************************************/
+
+static int __init state_next(void)
+{
+       int ret = 0;
+
+       switch (init_state) {
+       case IOMMU_START_STATE:
+               if (!detect_ivrs()) {
+                       init_state      = IOMMU_NOT_FOUND;
+                       ret             = -ENODEV;
+               } else {
+                       init_state      = IOMMU_IVRS_DETECTED;
+               }
+               break;
+       case IOMMU_IVRS_DETECTED:
+               ret = early_amd_iommu_init();
+               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+               if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
+                       pr_info("AMD IOMMU disabled\n");
+                       init_state = IOMMU_CMDLINE_DISABLED;
+                       ret = -EINVAL;
+               }
+               break;
+       case IOMMU_ACPI_FINISHED:
+               early_enable_iommus();
+               x86_platform.iommu_shutdown = disable_iommus;
+               init_state = IOMMU_ENABLED;
+               break;
+       case IOMMU_ENABLED:
+               register_syscore_ops(&amd_iommu_syscore_ops);
+               ret = amd_iommu_init_pci();
+               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+               enable_iommus_v2();
+               break;
+       case IOMMU_PCI_INIT:
+               ret = amd_iommu_enable_interrupts();
+               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
+               break;
+       case IOMMU_INTERRUPTS_EN:
+               ret = amd_iommu_init_dma_ops();
+               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
+               break;
+       case IOMMU_DMA_OPS:
+               init_state = IOMMU_INITIALIZED;
+               break;
+       case IOMMU_INITIALIZED:
+               /* Nothing to do */
+               break;
+       case IOMMU_NOT_FOUND:
+       case IOMMU_INIT_ERROR:
+       case IOMMU_CMDLINE_DISABLED:
+               /* Error states => do nothing */
+               ret = -EINVAL;
+               break;
+       default:
+               /* Unknown state */
+               BUG();
+       }
+
+       if (ret) {
+               free_dma_resources();
+               if (!irq_remapping_enabled) {
+                       disable_iommus();
+                       free_iommu_resources();
+               } else {
+                       struct amd_iommu *iommu;
+
+                       uninit_device_table_dma();
+                       for_each_iommu(iommu)
+                               iommu_flush_all_caches(iommu);
+               }
+       }
+       return ret;
+}
+
+static int __init iommu_go_to_state(enum iommu_init_state state)
+{
+       int ret = -EINVAL;
+
+       while (init_state != state) {
+               if (init_state == IOMMU_NOT_FOUND         ||
+                   init_state == IOMMU_INIT_ERROR        ||
+                   init_state == IOMMU_CMDLINE_DISABLED)
+                       break;
+               ret = state_next();
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_IRQ_REMAP
+int __init amd_iommu_prepare(void)
+{
+       int ret;
+
+       amd_iommu_irq_remap = true;
+
+       ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+       if (ret)
+               return ret;
+       return amd_iommu_irq_remap ? 0 : -ENODEV;
+}
+
+int __init amd_iommu_enable(void)
+{
+       int ret;
+
+       ret = iommu_go_to_state(IOMMU_ENABLED);
+       if (ret)
+               return ret;
+
+       irq_remapping_enabled = 1;
+       return amd_iommu_xt_mode;
+}
+
+void amd_iommu_disable(void)
+{
+       amd_iommu_suspend();
+}
+
+int amd_iommu_reenable(int mode)
+{
+       amd_iommu_resume();
+
+       return 0;
+}
+
+int __init amd_iommu_enable_faulting(void)
+{
+       /* We enable MSI later when PCI is initialized */
+       return 0;
+}
+#endif
+
+/*
+ * This is the core init function for AMD IOMMU hardware in the system.
+ * This function is called from the generic x86 DMA layer initialization
+ * code.
+ */
+static int __init amd_iommu_init(void)
+{
+       struct amd_iommu *iommu;
+       int ret;
+
+       ret = iommu_go_to_state(IOMMU_INITIALIZED);
+#ifdef CONFIG_GART_IOMMU
+       if (ret && list_empty(&amd_iommu_list)) {
+               /*
+                * We failed to initialize the AMD IOMMU - try fallback
+                * to GART if possible.
+                */
+               gart_iommu_init();
+       }
+#endif
+
+       for_each_iommu(iommu)
+               amd_iommu_debugfs_setup(iommu);
+
+       return ret;
+}
+
+static bool amd_iommu_sme_check(void)
+{
+       if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+               return true;
+
+       /* For Fam17h, a specific level of support is required */
+       if (boot_cpu_data.microcode >= 0x08001205)
+               return true;
+
+       if ((boot_cpu_data.microcode >= 0x08001126) &&
+           (boot_cpu_data.microcode <= 0x080011ff))
+               return true;
+
+       pr_notice("IOMMU not currently supported when SME is active\n");
+
+       return false;
+}
+
+/****************************************************************************
+ *
+ * Early detect code. This code runs at IOMMU detection time in the DMA
+ * layer. It just looks if there is an IVRS ACPI table to detect AMD
+ * IOMMUs
+ *
+ ****************************************************************************/
+int __init amd_iommu_detect(void)
+{
+       int ret;
+
+       if (no_iommu || (iommu_detected && !gart_iommu_aperture))
+               return -ENODEV;
+
+       if (!amd_iommu_sme_check())
+               return -ENODEV;
+
+       ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
+       if (ret)
+               return ret;
+
+       amd_iommu_detected = true;
+       iommu_detected = 1;
+       x86_init.iommu.iommu_init = amd_iommu_init;
+
+       return 1;
+}
+
+/****************************************************************************
+ *
+ * Parsing functions for the AMD IOMMU specific kernel command line
+ * options.
+ *
+ ****************************************************************************/
+
+static int __init parse_amd_iommu_dump(char *str)
+{
+       amd_iommu_dump = true;
+
+       return 1;
+}
+
+static int __init parse_amd_iommu_intr(char *str)
+{
+       for (; *str; ++str) {
+               if (strncmp(str, "legacy", 6) == 0) {
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+                       break;
+               }
+               if (strncmp(str, "vapic", 5) == 0) {
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+                       break;
+               }
+       }
+       return 1;
+}
+
+static int __init parse_amd_iommu_options(char *str)
+{
+       for (; *str; ++str) {
+               if (strncmp(str, "fullflush", 9) == 0)
+                       amd_iommu_unmap_flush = true;
+               if (strncmp(str, "off", 3) == 0)
+                       amd_iommu_disabled = true;
+               if (strncmp(str, "force_isolation", 15) == 0)
+                       amd_iommu_force_isolation = true;
+       }
+
+       return 1;
+}
+
+static int __init parse_ivrs_ioapic(char *str)
+{
+       unsigned int bus, dev, fn;
+       int ret, id, i;
+       u16 devid;
+
+       ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+       if (ret != 4) {
+               pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+               return 1;
+       }
+
+       if (early_ioapic_map_size == EARLY_MAP_SIZE) {
+               pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
+                       str);
+               return 1;
+       }
+
+       devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+       cmdline_maps                    = true;
+       i                               = early_ioapic_map_size++;
+       early_ioapic_map[i].id          = id;
+       early_ioapic_map[i].devid       = devid;
+       early_ioapic_map[i].cmd_line    = true;
+
+       return 1;
+}
+
+static int __init parse_ivrs_hpet(char *str)
+{
+       unsigned int bus, dev, fn;
+       int ret, id, i;
+       u16 devid;
+
+       ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+       if (ret != 4) {
+               pr_err("Invalid command line: ivrs_hpet%s\n", str);
+               return 1;
+       }
+
+       if (early_hpet_map_size == EARLY_MAP_SIZE) {
+               pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
+                       str);
+               return 1;
+       }
+
+       devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+       cmdline_maps                    = true;
+       i                               = early_hpet_map_size++;
+       early_hpet_map[i].id            = id;
+       early_hpet_map[i].devid         = devid;
+       early_hpet_map[i].cmd_line      = true;
+
+       return 1;
+}
+
+static int __init parse_ivrs_acpihid(char *str)
+{
+       u32 bus, dev, fn;
+       char *hid, *uid, *p;
+       char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
+       int ret, i;
+
+       ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
+       if (ret != 4) {
+               pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+               return 1;
+       }
+
+       p = acpiid;
+       hid = strsep(&p, ":");
+       uid = p;
+
+       if (!hid || !(*hid) || !uid) {
+               pr_err("Invalid command line: hid or uid\n");
+               return 1;
+       }
+
+       i = early_acpihid_map_size++;
+       memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+       memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+       early_acpihid_map[i].devid =
+               ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+       early_acpihid_map[i].cmd_line   = true;
+
+       return 1;
+}
+
+__setup("amd_iommu_dump",      parse_amd_iommu_dump);
+__setup("amd_iommu=",          parse_amd_iommu_options);
+__setup("amd_iommu_intr=",     parse_amd_iommu_intr);
+__setup("ivrs_ioapic",         parse_ivrs_ioapic);
+__setup("ivrs_hpet",           parse_ivrs_hpet);
+__setup("ivrs_acpihid",                parse_ivrs_acpihid);
+
+IOMMU_INIT_FINISH(amd_iommu_detect,
+                 gart_iommu_hole_init,
+                 NULL,
+                 NULL);
+
+bool amd_iommu_v2_supported(void)
+{
+       return amd_iommu_v2_present;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
+
+struct amd_iommu *get_amd_iommu(unsigned int idx)
+{
+       unsigned int i = 0;
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu)
+               if (i++ == idx)
+                       return iommu;
+       return NULL;
+}
+EXPORT_SYMBOL(get_amd_iommu);
+
+/****************************************************************************
+ *
+ * IOMMU EFR Performance Counter support functionality. This code allows
+ * access to the IOMMU PC functionality.
+ *
+ ****************************************************************************/
+
+u8 amd_iommu_pc_get_max_banks(unsigned int idx)
+{
+       struct amd_iommu *iommu = get_amd_iommu(idx);
+
+       if (iommu)
+               return iommu->max_banks;
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
+
+bool amd_iommu_pc_supported(void)
+{
+       return amd_iommu_pc_present;
+}
+EXPORT_SYMBOL(amd_iommu_pc_supported);
+
+u8 amd_iommu_pc_get_max_counters(unsigned int idx)
+{
+       struct amd_iommu *iommu = get_amd_iommu(idx);
+
+       if (iommu)
+               return iommu->max_counters;
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
+
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+                               u8 fxn, u64 *value, bool is_write)
+{
+       u32 offset;
+       u32 max_offset_lim;
+
+       /* Make sure the IOMMU PC resource is available */
+       if (!amd_iommu_pc_present)
+               return -ENODEV;
+
+       /* Check for valid iommu and pc register indexing */
+       if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
+               return -ENODEV;
+
+       offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
+
+       /* Limit the offset to the hw defined mmio region aperture */
+       max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
+                               (iommu->max_counters << 8) | 0x28);
+       if ((offset < MMIO_CNTR_REG_OFFSET) ||
+           (offset > max_offset_lim))
+               return -EINVAL;
+
+       if (is_write) {
+               u64 val = *value & GENMASK_ULL(47, 0);
+
+               writel((u32)val, iommu->mmio_base + offset);
+               writel((val >> 32), iommu->mmio_base + offset + 4);
+       } else {
+               *value = readl(iommu->mmio_base + offset + 4);
+               *value <<= 32;
+               *value |= readl(iommu->mmio_base + offset);
+               *value &= GENMASK_ULL(47, 0);
+       }
+
+       return 0;
+}
+
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+       if (!iommu)
+               return -EINVAL;
+
+       return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
+}
+EXPORT_SYMBOL(amd_iommu_pc_get_reg);
+
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+       if (!iommu)
+               return -EINVAL;
+
+       return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
+}
+EXPORT_SYMBOL(amd_iommu_pc_set_reg);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
new file mode 100644 (file)
index 0000000..74cca17
--- /dev/null
@@ -0,0 +1,4041 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ *         Leo Duran <leo.duran@amd.com>
+ */
+
+#define pr_fmt(fmt)     "AMD-Vi: " fmt
+#define dev_fmt(fmt)    pr_fmt(fmt)
+
+#include <linux/ratelimit.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
+#include <linux/pci-ats.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-iommu.h>
+#include <linux/iommu-helper.h>
+#include <linux/delay.h>
+#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/dma-contiguous.h>
+#include <linux/irqdomain.h>
+#include <linux/percpu.h>
+#include <linux/iova.h>
+#include <asm/irq_remapping.h>
+#include <asm/io_apic.h>
+#include <asm/apic.h>
+#include <asm/hw_irq.h>
+#include <asm/msidef.h>
+#include <asm/proto.h>
+#include <asm/iommu.h>
+#include <asm/gart.h>
+#include <asm/dma.h>
+
+#include "amd_iommu.h"
+#include "../irq_remapping.h"
+
+#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
+
+#define LOOP_TIMEOUT   100000
+
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN         (1)
+#define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
+
+/* Reserved IOVA ranges */
+#define MSI_RANGE_START                (0xfee00000)
+#define MSI_RANGE_END          (0xfeefffff)
+#define HT_RANGE_START         (0xfd00000000ULL)
+#define HT_RANGE_END           (0xffffffffffULL)
+
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * 512GB Pages are not supported due to a hardware bug
+ */
+#define AMD_IOMMU_PGSIZES      ((~0xFFFUL) & ~(2ULL << 38))
+
+#define DEFAULT_PGTABLE_LEVEL  PAGE_MODE_3_LEVEL
+
+static DEFINE_SPINLOCK(pd_bitmap_lock);
+
+/* List of all available dev_data structures */
+static LLIST_HEAD(dev_data_list);
+
+LIST_HEAD(ioapic_map);
+LIST_HEAD(hpet_map);
+LIST_HEAD(acpihid_map);
+
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+const struct iommu_ops amd_iommu_ops;
+
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
+/*
+ * general struct to manage commands send to an IOMMU
+ */
+struct iommu_cmd {
+       u32 data[4];
+};
+
+struct kmem_cache *amd_iommu_irq_cache;
+
+static void update_domain(struct protection_domain *domain);
+static void detach_device(struct device *dev);
+static void update_and_flush_device_table(struct protection_domain *domain,
+                                         struct domain_pgtable *pgtable);
+
+/****************************************************************************
+ *
+ * Helper functions
+ *
+ ****************************************************************************/
+
+static inline u16 get_pci_device_id(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       return pci_dev_id(pdev);
+}
+
+static inline int get_acpihid_device_id(struct device *dev,
+                                       struct acpihid_map_entry **entry)
+{
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+       struct acpihid_map_entry *p;
+
+       if (!adev)
+               return -ENODEV;
+
+       list_for_each_entry(p, &acpihid_map, list) {
+               if (acpi_dev_hid_uid_match(adev, p->hid,
+                                          p->uid[0] ? p->uid : NULL)) {
+                       if (entry)
+                               *entry = p;
+                       return p->devid;
+               }
+       }
+       return -EINVAL;
+}
+
+static inline int get_device_id(struct device *dev)
+{
+       int devid;
+
+       if (dev_is_pci(dev))
+               devid = get_pci_device_id(dev);
+       else
+               devid = get_acpihid_device_id(dev, NULL);
+
+       return devid;
+}
+
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct protection_domain, domain);
+}
+
+static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
+                                        struct domain_pgtable *pgtable)
+{
+       u64 pt_root = atomic64_read(&domain->pt_root);
+
+       pgtable->root = (u64 *)(pt_root & PAGE_MASK);
+       pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+{
+       u64 pt_root;
+
+       /* lowest 3 bits encode pgtable mode */
+       pt_root = mode & 7;
+       pt_root |= (u64)root;
+
+       return pt_root;
+}
+
+static struct iommu_dev_data *alloc_dev_data(u16 devid)
+{
+       struct iommu_dev_data *dev_data;
+
+       dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+       if (!dev_data)
+               return NULL;
+
+       spin_lock_init(&dev_data->lock);
+       dev_data->devid = devid;
+       ratelimit_default_init(&dev_data->rs);
+
+       llist_add(&dev_data->dev_data_list, &dev_data_list);
+       return dev_data;
+}
+
+static struct iommu_dev_data *search_dev_data(u16 devid)
+{
+       struct iommu_dev_data *dev_data;
+       struct llist_node *node;
+
+       if (llist_empty(&dev_data_list))
+               return NULL;
+
+       node = dev_data_list.first;
+       llist_for_each_entry(dev_data, node, dev_data_list) {
+               if (dev_data->devid == devid)
+                       return dev_data;
+       }
+
+       return NULL;
+}
+
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       u16 devid = pci_dev_id(pdev);
+
+       if (devid == alias)
+               return 0;
+
+       amd_iommu_rlookup_table[alias] =
+               amd_iommu_rlookup_table[devid];
+       memcpy(amd_iommu_dev_table[alias].data,
+              amd_iommu_dev_table[devid].data,
+              sizeof(amd_iommu_dev_table[alias].data));
+
+       return 0;
+}
+
+static void clone_aliases(struct pci_dev *pdev)
+{
+       if (!pdev)
+               return;
+
+       /*
+        * The IVRS alias stored in the alias table may not be
+        * part of the PCI DMA aliases if it's bus differs
+        * from the original device.
+        */
+       clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+
+       pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
+
+static struct pci_dev *setup_aliases(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 ivrs_alias;
+
+       /* For ACPI HID devices, there are no aliases */
+       if (!dev_is_pci(dev))
+               return NULL;
+
+       /*
+        * Add the IVRS alias to the pci aliases if it is on the same
+        * bus. The IVRS table may know about a quirk that we don't.
+        */
+       ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+       if (ivrs_alias != pci_dev_id(pdev) &&
+           PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
+               pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
+
+       clone_aliases(pdev);
+
+       return pdev;
+}
+
+static struct iommu_dev_data *find_dev_data(u16 devid)
+{
+       struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+       dev_data = search_dev_data(devid);
+
+       if (dev_data == NULL) {
+               dev_data = alloc_dev_data(devid);
+               if (!dev_data)
+                       return NULL;
+
+               if (translation_pre_enabled(iommu))
+                       dev_data->defer_attach = true;
+       }
+
+       return dev_data;
+}
+
+/*
+* Find or create an IOMMU group for a acpihid device.
+*/
+static struct iommu_group *acpihid_device_group(struct device *dev)
+{
+       struct acpihid_map_entry *p, *entry = NULL;
+       int devid;
+
+       devid = get_acpihid_device_id(dev, &entry);
+       if (devid < 0)
+               return ERR_PTR(devid);
+
+       list_for_each_entry(p, &acpihid_map, list) {
+               if ((devid == p->devid) && p->group)
+                       entry->group = p->group;
+       }
+
+       if (!entry->group)
+               entry->group = generic_device_group(dev);
+       else
+               iommu_group_ref_get(entry->group);
+
+       return entry->group;
+}
+
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+       static const int caps[] = {
+               PCI_EXT_CAP_ID_PRI,
+               PCI_EXT_CAP_ID_PASID,
+       };
+       int i, pos;
+
+       if (!pci_ats_supported(pdev))
+               return false;
+
+       for (i = 0; i < 2; ++i) {
+               pos = pci_find_ext_capability(pdev, caps[i]);
+               if (pos == 0)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
+{
+       struct iommu_dev_data *dev_data;
+
+       dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       return dev_data->errata & (1 << erratum) ? true : false;
+}
+
+/*
+ * This function checks if the driver got a valid device from the caller to
+ * avoid dereferencing invalid pointers.
+ */
+static bool check_device(struct device *dev)
+{
+       int devid;
+
+       if (!dev)
+               return false;
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return false;
+
+       /* Out of our scope? */
+       if (devid > amd_iommu_last_bdf)
+               return false;
+
+       if (amd_iommu_rlookup_table[devid] == NULL)
+               return false;
+
+       return true;
+}
+
+static int iommu_init_device(struct device *dev)
+{
+       struct iommu_dev_data *dev_data;
+       int devid;
+
+       if (dev_iommu_priv_get(dev))
+               return 0;
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return devid;
+
+       dev_data = find_dev_data(devid);
+       if (!dev_data)
+               return -ENOMEM;
+
+       dev_data->pdev = setup_aliases(dev);
+
+       /*
+        * By default we use passthrough mode for IOMMUv2 capable device.
+        * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
+        * invalid address), we ignore the capability for the device so
+        * it'll be forced to go into translation mode.
+        */
+       if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
+           dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+               struct amd_iommu *iommu;
+
+               iommu = amd_iommu_rlookup_table[dev_data->devid];
+               dev_data->iommu_v2 = iommu->is_iommu_v2;
+       }
+
+       dev_iommu_priv_set(dev, dev_data);
+
+       return 0;
+}
+
+static void iommu_ignore_device(struct device *dev)
+{
+       int devid;
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return;
+
+       amd_iommu_rlookup_table[devid] = NULL;
+       memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+
+       setup_aliases(dev);
+}
+
+static void amd_iommu_uninit_device(struct device *dev)
+{
+       struct iommu_dev_data *dev_data;
+
+       dev_data = dev_iommu_priv_get(dev);
+       if (!dev_data)
+               return;
+
+       if (dev_data->domain)
+               detach_device(dev);
+
+       dev_iommu_priv_set(dev, NULL);
+
+       /*
+        * We keep dev_data around for unplugged devices and reuse it when the
+        * device is re-plugged - not doing so would introduce a ton of races.
+        */
+}
+
+/*
+ * Helper function to get the first pte of a large mapping
+ */
+static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
+                        unsigned long *count)
+{
+       unsigned long pte_mask, pg_size, cnt;
+       u64 *fpte;
+
+       pg_size  = PTE_PAGE_SIZE(*pte);
+       cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
+       pte_mask = ~((cnt << 3) - 1);
+       fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
+
+       if (page_size)
+               *page_size = pg_size;
+
+       if (count)
+               *count = cnt;
+
+       return fpte;
+}
+
+/****************************************************************************
+ *
+ * Interrupt handling functions
+ *
+ ****************************************************************************/
+
+static void dump_dte_entry(u16 devid)
+{
+       int i;
+
+       for (i = 0; i < 4; ++i)
+               pr_err("DTE[%d]: %016llx\n", i,
+                       amd_iommu_dev_table[devid].data[i]);
+}
+
+static void dump_command(unsigned long phys_addr)
+{
+       struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
+       int i;
+
+       for (i = 0; i < 4; ++i)
+               pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
+}
+
+static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
+                                       u64 address, int flags)
+{
+       struct iommu_dev_data *dev_data = NULL;
+       struct pci_dev *pdev;
+
+       pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+                                          devid & 0xff);
+       if (pdev)
+               dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data && __ratelimit(&dev_data->rs)) {
+               pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
+                       domain_id, address, flags);
+       } else if (printk_ratelimit()) {
+               pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       domain_id, address, flags);
+       }
+
+       if (pdev)
+               pci_dev_put(pdev);
+}
+
+static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
+{
+       struct device *dev = iommu->iommu.dev;
+       int type, devid, pasid, flags, tag;
+       volatile u32 *event = __evt;
+       int count = 0;
+       u64 address;
+
+retry:
+       type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
+       devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+       pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
+                 (event[1] & EVENT_DOMID_MASK_LO);
+       flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+       address = (u64)(((u64)event[3]) << 32) | event[2];
+
+       if (type == 0) {
+               /* Did we hit the erratum? */
+               if (++count == LOOP_TIMEOUT) {
+                       pr_err("No event written to event log\n");
+                       return;
+               }
+               udelay(1);
+               goto retry;
+       }
+
+       if (type == EVENT_TYPE_IO_FAULT) {
+               amd_iommu_report_page_fault(devid, pasid, address, flags);
+               return;
+       }
+
+       switch (type) {
+       case EVENT_TYPE_ILL_DEV:
+               dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       pasid, address, flags);
+               dump_dte_entry(devid);
+               break;
+       case EVENT_TYPE_DEV_TAB_ERR:
+               dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+                       "address=0x%llx flags=0x%04x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       address, flags);
+               break;
+       case EVENT_TYPE_PAGE_TAB_ERR:
+               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       pasid, address, flags);
+               break;
+       case EVENT_TYPE_ILL_CMD:
+               dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
+               dump_command(address);
+               break;
+       case EVENT_TYPE_CMD_HARD_ERR:
+               dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
+                       address, flags);
+               break;
+       case EVENT_TYPE_IOTLB_INV_TO:
+               dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       address);
+               break;
+       case EVENT_TYPE_INV_DEV_REQ:
+               dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       pasid, address, flags);
+               break;
+       case EVENT_TYPE_INV_PPR_REQ:
+               pasid = PPR_PASID(*((u64 *)__evt));
+               tag = event[1] & 0x03FF;
+               dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+                       pasid, address, flags, tag);
+               break;
+       default:
+               dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
+                       event[0], event[1], event[2], event[3]);
+       }
+
+       memset(__evt, 0, 4 * sizeof(u32));
+}
+
+static void iommu_poll_events(struct amd_iommu *iommu)
+{
+       u32 head, tail;
+
+       head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+       tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+       while (head != tail) {
+               iommu_print_event(iommu, iommu->evt_buf + head);
+               head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
+       }
+
+       writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+}
+
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
+{
+       struct amd_iommu_fault fault;
+
+       if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+               pr_err_ratelimited("Unknown PPR request received\n");
+               return;
+       }
+
+       fault.address   = raw[1];
+       fault.pasid     = PPR_PASID(raw[0]);
+       fault.device_id = PPR_DEVID(raw[0]);
+       fault.tag       = PPR_TAG(raw[0]);
+       fault.flags     = PPR_FLAGS(raw[0]);
+
+       atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+       u32 head, tail;
+
+       if (iommu->ppr_log == NULL)
+               return;
+
+       head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+       tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+       while (head != tail) {
+               volatile u64 *raw;
+               u64 entry[2];
+               int i;
+
+               raw = (u64 *)(iommu->ppr_log + head);
+
+               /*
+                * Hardware bug: Interrupt may arrive before the entry is
+                * written to memory. If this happens we need to wait for the
+                * entry to arrive.
+                */
+               for (i = 0; i < LOOP_TIMEOUT; ++i) {
+                       if (PPR_REQ_TYPE(raw[0]) != 0)
+                               break;
+                       udelay(1);
+               }
+
+               /* Avoid memcpy function-call overhead */
+               entry[0] = raw[0];
+               entry[1] = raw[1];
+
+               /*
+                * To detect the hardware bug we need to clear the entry
+                * back to zero.
+                */
+               raw[0] = raw[1] = 0UL;
+
+               /* Update head pointer of hardware ring-buffer */
+               head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+               writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+               /* Handle PPR entry */
+               iommu_handle_ppr_entry(iommu, entry);
+
+               /* Refresh ring-buffer information */
+               head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+               tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+       }
+}
+
+#ifdef CONFIG_IRQ_REMAP
+static int (*iommu_ga_log_notifier)(u32);
+
+int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+{
+       iommu_ga_log_notifier = notifier;
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
+
+static void iommu_poll_ga_log(struct amd_iommu *iommu)
+{
+       u32 head, tail, cnt = 0;
+
+       if (iommu->ga_log == NULL)
+               return;
+
+       head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+       tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+       while (head != tail) {
+               volatile u64 *raw;
+               u64 log_entry;
+
+               raw = (u64 *)(iommu->ga_log + head);
+               cnt++;
+
+               /* Avoid memcpy function-call overhead */
+               log_entry = *raw;
+
+               /* Update head pointer of hardware ring-buffer */
+               head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
+               writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+
+               /* Handle GA entry */
+               switch (GA_REQ_TYPE(log_entry)) {
+               case GA_GUEST_NR:
+                       if (!iommu_ga_log_notifier)
+                               break;
+
+                       pr_debug("%s: devid=%#x, ga_tag=%#x\n",
+                                __func__, GA_DEVID(log_entry),
+                                GA_TAG(log_entry));
+
+                       if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
+                               pr_err("GA log notifier failed.\n");
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#define AMD_IOMMU_INT_MASK     \
+       (MMIO_STATUS_EVT_INT_MASK | \
+        MMIO_STATUS_PPR_INT_MASK | \
+        MMIO_STATUS_GALOG_INT_MASK)
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data)
+{
+       struct amd_iommu *iommu = (struct amd_iommu *) data;
+       u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+       while (status & AMD_IOMMU_INT_MASK) {
+               /* Enable EVT and PPR and GA interrupts again */
+               writel(AMD_IOMMU_INT_MASK,
+                       iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+               if (status & MMIO_STATUS_EVT_INT_MASK) {
+                       pr_devel("Processing IOMMU Event Log\n");
+                       iommu_poll_events(iommu);
+               }
+
+               if (status & MMIO_STATUS_PPR_INT_MASK) {
+                       pr_devel("Processing IOMMU PPR Log\n");
+                       iommu_poll_ppr_log(iommu);
+               }
+
+#ifdef CONFIG_IRQ_REMAP
+               if (status & MMIO_STATUS_GALOG_INT_MASK) {
+                       pr_devel("Processing IOMMU GA Log\n");
+                       iommu_poll_ga_log(iommu);
+               }
+#endif
+
+               /*
+                * Hardware bug: ERBT1312
+                * When re-enabling interrupt (by writing 1
+                * to clear the bit), the hardware might also try to set
+                * the interrupt bit in the event status register.
+                * In this scenario, the bit will be set, and disable
+                * subsequent interrupts.
+                *
+                * Workaround: The IOMMU driver should read back the
+                * status register and check if the interrupt bits are cleared.
+                * If not, driver will need to go through the interrupt handler
+                * again and re-clear the bits
+                */
+               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+       }
+       return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_handler(int irq, void *data)
+{
+       return IRQ_WAKE_THREAD;
+}
+
+/****************************************************************************
+ *
+ * IOMMU command queuing functions
+ *
+ ****************************************************************************/
+
+static int wait_on_sem(volatile u64 *sem)
+{
+       int i = 0;
+
+       while (*sem == 0 && i < LOOP_TIMEOUT) {
+               udelay(1);
+               i += 1;
+       }
+
+       if (i == LOOP_TIMEOUT) {
+               pr_alert("Completion-Wait loop timed out\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+                              struct iommu_cmd *cmd)
+{
+       u8 *target;
+       u32 tail;
+
+       /* Copy command to buffer */
+       tail = iommu->cmd_buf_tail;
+       target = iommu->cmd_buf + tail;
+       memcpy(target, cmd, sizeof(*cmd));
+
+       tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+       iommu->cmd_buf_tail = tail;
+
+       /* Tell the IOMMU about it */
+       writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+}
+
+static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+{
+       u64 paddr = iommu_virt_to_phys((void *)address);
+
+       WARN_ON(address & 0x7ULL);
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+       cmd->data[1] = upper_32_bits(paddr);
+       cmd->data[2] = 1;
+       CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+}
+
+static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
+{
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0] = devid;
+       CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
+}
+
+static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+                                 size_t size, u16 domid, int pde)
+{
+       u64 pages;
+       bool s;
+
+       pages = iommu_num_pages(address, size, PAGE_SIZE);
+       s     = false;
+
+       if (pages > 1) {
+               /*
+                * If we have to flush more than one page, flush all
+                * TLB entries for this domain
+                */
+               address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+               s = true;
+       }
+
+       address &= PAGE_MASK;
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[1] |= domid;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[3]  = upper_32_bits(address);
+       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+       if (s) /* size bit - we flush more than one 4kb page */
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+}
+
+static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
+                                 u64 address, size_t size)
+{
+       u64 pages;
+       bool s;
+
+       pages = iommu_num_pages(address, size, PAGE_SIZE);
+       s     = false;
+
+       if (pages > 1) {
+               /*
+                * If we have to flush more than one page, flush all
+                * TLB entries for this domain
+                */
+               address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+               s = true;
+       }
+
+       address &= PAGE_MASK;
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0]  = devid;
+       cmd->data[0] |= (qdep & 0xff) << 24;
+       cmd->data[1]  = devid;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[3]  = upper_32_bits(address);
+       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+       if (s)
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+}
+
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+                                 u64 address, bool size)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       address &= ~(0xfffULL);
+
+       cmd->data[0]  = pasid;
+       cmd->data[1]  = domid;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[3]  = upper_32_bits(address);
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       if (size)
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+                                 int qdep, u64 address, bool size)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       address &= ~(0xfffULL);
+
+       cmd->data[0]  = devid;
+       cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+       cmd->data[0] |= (qdep  & 0xff) << 24;
+       cmd->data[1]  = devid;
+       cmd->data[1] |= (pasid & 0xff) << 16;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       cmd->data[3]  = upper_32_bits(address);
+       if (size)
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+                              int status, int tag, bool gn)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       cmd->data[0]  = devid;
+       if (gn) {
+               cmd->data[1]  = pasid;
+               cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
+       }
+       cmd->data[3]  = tag & 0x1ff;
+       cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+       CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
+static void build_inv_all(struct iommu_cmd *cmd)
+{
+       memset(cmd, 0, sizeof(*cmd));
+       CMD_SET_TYPE(cmd, CMD_INV_ALL);
+}
+
+static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
+{
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0] = devid;
+       CMD_SET_TYPE(cmd, CMD_INV_IRT);
+}
+
+/*
+ * Writes the command to the IOMMUs command buffer and informs the
+ * hardware about the new command.
+ */
+static int __iommu_queue_command_sync(struct amd_iommu *iommu,
+                                     struct iommu_cmd *cmd,
+                                     bool sync)
+{
+       unsigned int count = 0;
+       u32 left, next_tail;
+
+       next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+again:
+       left      = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
+
+       if (left <= 0x20) {
+               /* Skip udelay() the first time around */
+               if (count++) {
+                       if (count == LOOP_TIMEOUT) {
+                               pr_err("Command buffer timeout\n");
+                               return -EIO;
+                       }
+
+                       udelay(1);
+               }
+
+               /* Update head and recheck remaining space */
+               iommu->cmd_buf_head = readl(iommu->mmio_base +
+                                           MMIO_CMD_HEAD_OFFSET);
+
+               goto again;
+       }
+
+       copy_cmd_to_buffer(iommu, cmd);
+
+       /* Do we need to make sure all commands are processed? */
+       iommu->need_sync = sync;
+
+       return 0;
+}
+
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+                                   struct iommu_cmd *cmd,
+                                   bool sync)
+{
+       unsigned long flags;
+       int ret;
+
+       raw_spin_lock_irqsave(&iommu->lock, flags);
+       ret = __iommu_queue_command_sync(iommu, cmd, sync);
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
+       return ret;
+}
+
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+       return iommu_queue_command_sync(iommu, cmd, true);
+}
+
+/*
+ * This function queues a completion wait command into the command
+ * buffer of an IOMMU
+ */
+static int iommu_completion_wait(struct amd_iommu *iommu)
+{
+       struct iommu_cmd cmd;
+       unsigned long flags;
+       int ret;
+
+       if (!iommu->need_sync)
+               return 0;
+
+
+       build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
+
+       raw_spin_lock_irqsave(&iommu->lock, flags);
+
+       iommu->cmd_sem = 0;
+
+       ret = __iommu_queue_command_sync(iommu, &cmd, false);
+       if (ret)
+               goto out_unlock;
+
+       ret = wait_on_sem(&iommu->cmd_sem);
+
+out_unlock:
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
+       return ret;
+}
+
+static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_dte(&cmd, devid);
+
+       return iommu_queue_command(iommu, &cmd);
+}
+
+static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
+{
+       u32 devid;
+
+       for (devid = 0; devid <= 0xffff; ++devid)
+               iommu_flush_dte(iommu, devid);
+
+       iommu_completion_wait(iommu);
+}
+
+/*
+ * This function uses heavy locking and may disable irqs for some time. But
+ * this is no issue because it is only called during resume.
+ */
+static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
+{
+       u32 dom_id;
+
+       for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+               struct iommu_cmd cmd;
+               build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                                     dom_id, 1);
+               iommu_queue_command(iommu, &cmd);
+       }
+
+       iommu_completion_wait(iommu);
+}
+
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                             dom_id, 1);
+       iommu_queue_command(iommu, &cmd);
+
+       iommu_completion_wait(iommu);
+}
+
+static void amd_iommu_flush_all(struct amd_iommu *iommu)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_all(&cmd);
+
+       iommu_queue_command(iommu, &cmd);
+       iommu_completion_wait(iommu);
+}
+
+static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_irt(&cmd, devid);
+
+       iommu_queue_command(iommu, &cmd);
+}
+
+static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
+{
+       u32 devid;
+
+       for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+               iommu_flush_irt(iommu, devid);
+
+       iommu_completion_wait(iommu);
+}
+
+void iommu_flush_all_caches(struct amd_iommu *iommu)
+{
+       if (iommu_feature(iommu, FEATURE_IA)) {
+               amd_iommu_flush_all(iommu);
+       } else {
+               amd_iommu_flush_dte_all(iommu);
+               amd_iommu_flush_irt_all(iommu);
+               amd_iommu_flush_tlb_all(iommu);
+       }
+}
+
+/*
+ * Command send function for flushing on-device TLB
+ */
+static int device_flush_iotlb(struct iommu_dev_data *dev_data,
+                             u64 address, size_t size)
+{
+       struct amd_iommu *iommu;
+       struct iommu_cmd cmd;
+       int qdep;
+
+       qdep     = dev_data->ats.qdep;
+       iommu    = amd_iommu_rlookup_table[dev_data->devid];
+
+       build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+
+       return iommu_queue_command(iommu, &cmd);
+}
+
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       struct amd_iommu *iommu = data;
+
+       return iommu_flush_dte(iommu, alias);
+}
+
+/*
+ * Command send function for invalidating a device table entry
+ */
+static int device_flush_dte(struct iommu_dev_data *dev_data)
+{
+       struct amd_iommu *iommu;
+       u16 alias;
+       int ret;
+
+       iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+       if (dev_data->pdev)
+               ret = pci_for_each_dma_alias(dev_data->pdev,
+                                            device_flush_dte_alias, iommu);
+       else
+               ret = iommu_flush_dte(iommu, dev_data->devid);
+       if (ret)
+               return ret;
+
+       alias = amd_iommu_alias_table[dev_data->devid];
+       if (alias != dev_data->devid) {
+               ret = iommu_flush_dte(iommu, alias);
+               if (ret)
+                       return ret;
+       }
+
+       if (dev_data->ats.enabled)
+               ret = device_flush_iotlb(dev_data, 0, ~0UL);
+
+       return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It invalidates a single PTE if the range to flush is within a single
+ * page. Otherwise it flushes the whole TLB of the IOMMU.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+                                u64 address, size_t size, int pde)
+{
+       struct iommu_dev_data *dev_data;
+       struct iommu_cmd cmd;
+       int ret = 0, i;
+
+       build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+
+       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+               if (!domain->dev_iommu[i])
+                       continue;
+
+               /*
+                * Devices of this domain are behind this IOMMU
+                * We need a TLB flush
+                */
+               ret |= iommu_queue_command(amd_iommus[i], &cmd);
+       }
+
+       list_for_each_entry(dev_data, &domain->dev_list, list) {
+
+               if (!dev_data->ats.enabled)
+                       continue;
+
+               ret |= device_flush_iotlb(dev_data, address, size);
+       }
+
+       WARN_ON(ret);
+}
+
+static void domain_flush_pages(struct protection_domain *domain,
+                              u64 address, size_t size)
+{
+       __domain_flush_pages(domain, address, size, 0);
+}
+
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+static void domain_flush_tlb_pde(struct protection_domain *domain)
+{
+       __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+}
+
+static void domain_flush_complete(struct protection_domain *domain)
+{
+       int i;
+
+       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+               if (domain && !domain->dev_iommu[i])
+                       continue;
+
+               /*
+                * Devices of this domain are behind this IOMMU
+                * We need to wait for completion of all commands.
+                */
+               iommu_completion_wait(amd_iommus[i]);
+       }
+}
+
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+               dma_addr_t iova, size_t size)
+{
+       if (unlikely(amd_iommu_np_cache)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&domain->lock, flags);
+               domain_flush_pages(domain, iova, size);
+               domain_flush_complete(domain);
+               spin_unlock_irqrestore(&domain->lock, flags);
+       }
+}
+
+
+/*
+ * This function flushes the DTEs for all devices in domain
+ */
+static void domain_flush_devices(struct protection_domain *domain)
+{
+       struct iommu_dev_data *dev_data;
+
+       list_for_each_entry(dev_data, &domain->dev_list, list)
+               device_flush_dte(dev_data);
+}
+
+/****************************************************************************
+ *
+ * The functions below are used the create the page table mappings for
+ * unity mapped regions.
+ *
+ ****************************************************************************/
+
+static void free_page_list(struct page *freelist)
+{
+       while (freelist != NULL) {
+               unsigned long p = (unsigned long)page_address(freelist);
+               freelist = freelist->freelist;
+               free_page(p);
+       }
+}
+
+static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+{
+       struct page *p = virt_to_page((void *)pt);
+
+       p->freelist = freelist;
+
+       return p;
+}
+
+#define DEFINE_FREE_PT_FN(LVL, FN)                                             \
+static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist)  \
+{                                                                              \
+       unsigned long p;                                                        \
+       u64 *pt;                                                                \
+       int i;                                                                  \
+                                                                               \
+       pt = (u64 *)__pt;                                                       \
+                                                                               \
+       for (i = 0; i < 512; ++i) {                                             \
+               /* PTE present? */                                              \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                                  \
+                       continue;                                               \
+                                                                               \
+               /* Large PTE? */                                                \
+               if (PM_PTE_LEVEL(pt[i]) == 0 ||                                 \
+                   PM_PTE_LEVEL(pt[i]) == 7)                                   \
+                       continue;                                               \
+                                                                               \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);                       \
+               freelist = FN(p, freelist);                                     \
+       }                                                                       \
+                                                                               \
+       return free_pt_page((unsigned long)pt, freelist);                       \
+}
+
+DEFINE_FREE_PT_FN(l2, free_pt_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
+static struct page *free_sub_pt(unsigned long root, int mode,
+                               struct page *freelist)
+{
+       switch (mode) {
+       case PAGE_MODE_NONE:
+       case PAGE_MODE_7_LEVEL:
+               break;
+       case PAGE_MODE_1_LEVEL:
+               freelist = free_pt_page(root, freelist);
+               break;
+       case PAGE_MODE_2_LEVEL:
+               freelist = free_pt_l2(root, freelist);
+               break;
+       case PAGE_MODE_3_LEVEL:
+               freelist = free_pt_l3(root, freelist);
+               break;
+       case PAGE_MODE_4_LEVEL:
+               freelist = free_pt_l4(root, freelist);
+               break;
+       case PAGE_MODE_5_LEVEL:
+               freelist = free_pt_l5(root, freelist);
+               break;
+       case PAGE_MODE_6_LEVEL:
+               freelist = free_pt_l6(root, freelist);
+               break;
+       default:
+               BUG();
+       }
+
+       return freelist;
+}
+
+static void free_pagetable(struct domain_pgtable *pgtable)
+{
+       struct page *freelist = NULL;
+       unsigned long root;
+
+       if (pgtable->mode == PAGE_MODE_NONE)
+               return;
+
+       BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+              pgtable->mode > PAGE_MODE_6_LEVEL);
+
+       root = (unsigned long)pgtable->root;
+       freelist = free_sub_pt(root, pgtable->mode, freelist);
+
+       free_page_list(freelist);
+}
+
+/*
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+static bool increase_address_space(struct protection_domain *domain,
+                                  unsigned long address,
+                                  gfp_t gfp)
+{
+       struct domain_pgtable pgtable;
+       unsigned long flags;
+       bool ret = true;
+       u64 *pte, root;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       if (address <= PM_LEVEL_SIZE(pgtable.mode))
+               goto out;
+
+       ret = false;
+       if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+               goto out;
+
+       pte = (void *)get_zeroed_page(gfp);
+       if (!pte)
+               goto out;
+
+       *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+
+       pgtable.root  = pte;
+       pgtable.mode += 1;
+       update_and_flush_device_table(domain, &pgtable);
+       domain_flush_complete(domain);
+
+       /*
+        * Device Table needs to be updated and flushed before the new root can
+        * be published.
+        */
+       root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
+       atomic64_set(&domain->pt_root, root);
+
+       ret = true;
+
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+                     unsigned long address,
+                     unsigned long page_size,
+                     u64 **pte_page,
+                     gfp_t gfp,
+                     bool *updated)
+{
+       struct domain_pgtable pgtable;
+       int level, end_lvl;
+       u64 *pte, *page;
+
+       BUG_ON(!is_power_of_2(page_size));
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+               /*
+                * Return an error if there is no memory to update the
+                * page-table.
+                */
+               if (!increase_address_space(domain, address, gfp))
+                       return NULL;
+
+               /* Read new values to check if update was successful */
+               amd_iommu_domain_get_pgtable(domain, &pgtable);
+       }
+
+
+       level   = pgtable.mode - 1;
+       pte     = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+       address = PAGE_SIZE_ALIGN(address, page_size);
+       end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+       while (level > end_lvl) {
+               u64 __pte, __npte;
+               int pte_level;
+
+               __pte     = *pte;
+               pte_level = PM_PTE_LEVEL(__pte);
+
+               /*
+                * If we replace a series of large PTEs, we need
+                * to tear down all of them.
+                */
+               if (IOMMU_PTE_PRESENT(__pte) &&
+                   pte_level == PAGE_MODE_7_LEVEL) {
+                       unsigned long count, i;
+                       u64 *lpte;
+
+                       lpte = first_pte_l7(pte, NULL, &count);
+
+                       /*
+                        * Unmap the replicated PTEs that still match the
+                        * original large mapping
+                        */
+                       for (i = 0; i < count; ++i)
+                               cmpxchg64(&lpte[i], __pte, 0ULL);
+
+                       *updated = true;
+                       continue;
+               }
+
+               if (!IOMMU_PTE_PRESENT(__pte) ||
+                   pte_level == PAGE_MODE_NONE) {
+                       page = (u64 *)get_zeroed_page(gfp);
+
+                       if (!page)
+                               return NULL;
+
+                       __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
+
+                       /* pte could have been changed somewhere. */
+                       if (cmpxchg64(pte, __pte, __npte) != __pte)
+                               free_page((unsigned long)page);
+                       else if (IOMMU_PTE_PRESENT(__pte))
+                               *updated = true;
+
+                       continue;
+               }
+
+               /* No level skipping support yet */
+               if (pte_level != level)
+                       return NULL;
+
+               level -= 1;
+
+               pte = IOMMU_PTE_PAGE(__pte);
+
+               if (pte_page && level == end_lvl)
+                       *pte_page = pte;
+
+               pte = &pte[PM_LEVEL_INDEX(level, address)];
+       }
+
+       return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address. If
+ * there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct protection_domain *domain,
+                     unsigned long address,
+                     unsigned long *page_size)
+{
+       struct domain_pgtable pgtable;
+       int level;
+       u64 *pte;
+
+       *page_size = 0;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       if (address > PM_LEVEL_SIZE(pgtable.mode))
+               return NULL;
+
+       level      =  pgtable.mode - 1;
+       pte        = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+       *page_size =  PTE_LEVEL_PAGE_SIZE(level);
+
+       while (level > 0) {
+
+               /* Not Present */
+               if (!IOMMU_PTE_PRESENT(*pte))
+                       return NULL;
+
+               /* Large PTE */
+               if (PM_PTE_LEVEL(*pte) == 7 ||
+                   PM_PTE_LEVEL(*pte) == 0)
+                       break;
+
+               /* No level skipping support yet */
+               if (PM_PTE_LEVEL(*pte) != level)
+                       return NULL;
+
+               level -= 1;
+
+               /* Walk to the next level */
+               pte        = IOMMU_PTE_PAGE(*pte);
+               pte        = &pte[PM_LEVEL_INDEX(level, address)];
+               *page_size = PTE_LEVEL_PAGE_SIZE(level);
+       }
+
+       /*
+        * If we have a series of large PTEs, make
+        * sure to return a pointer to the first one.
+        */
+       if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
+               pte = first_pte_l7(pte, page_size, NULL);
+
+       return pte;
+}
+
+static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+{
+       unsigned long pt;
+       int mode;
+
+       while (cmpxchg64(pte, pteval, 0) != pteval) {
+               pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
+               pteval = *pte;
+       }
+
+       if (!IOMMU_PTE_PRESENT(pteval))
+               return freelist;
+
+       pt   = (unsigned long)IOMMU_PTE_PAGE(pteval);
+       mode = IOMMU_PTE_MODE(pteval);
+
+       return free_sub_pt(pt, mode, freelist);
+}
+
+/*
+ * Generic mapping functions. It maps a physical address into a DMA
+ * address space. It allocates the page table pages if necessary.
+ * In the future it can be extended to a generic mapping function
+ * supporting all features of AMD IOMMU page tables like level skipping
+ * and full 64 bit address spaces.
+ */
+static int iommu_map_page(struct protection_domain *dom,
+                         unsigned long bus_addr,
+                         unsigned long phys_addr,
+                         unsigned long page_size,
+                         int prot,
+                         gfp_t gfp)
+{
+       struct page *freelist = NULL;
+       bool updated = false;
+       u64 __pte, *pte;
+       int ret, i, count;
+
+       BUG_ON(!IS_ALIGNED(bus_addr, page_size));
+       BUG_ON(!IS_ALIGNED(phys_addr, page_size));
+
+       ret = -EINVAL;
+       if (!(prot & IOMMU_PROT_MASK))
+               goto out;
+
+       count = PAGE_SIZE_PTE_COUNT(page_size);
+       pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
+
+       ret = -ENOMEM;
+       if (!pte)
+               goto out;
+
+       for (i = 0; i < count; ++i)
+               freelist = free_clear_pte(&pte[i], pte[i], freelist);
+
+       if (freelist != NULL)
+               updated = true;
+
+       if (count > 1) {
+               __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
+               __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+       } else
+               __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+
+       if (prot & IOMMU_PROT_IR)
+               __pte |= IOMMU_PTE_IR;
+       if (prot & IOMMU_PROT_IW)
+               __pte |= IOMMU_PTE_IW;
+
+       for (i = 0; i < count; ++i)
+               pte[i] = __pte;
+
+       ret = 0;
+
+out:
+       if (updated) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dom->lock, flags);
+               /*
+                * Flush domain TLB(s) and wait for completion. Any Device-Table
+                * Updates and flushing already happened in
+                * increase_address_space().
+                */
+               domain_flush_tlb_pde(dom);
+               domain_flush_complete(dom);
+               spin_unlock_irqrestore(&dom->lock, flags);
+       }
+
+       /* Everything flushed out, free pages now */
+       free_page_list(freelist);
+
+       return ret;
+}
+
+static unsigned long iommu_unmap_page(struct protection_domain *dom,
+                                     unsigned long bus_addr,
+                                     unsigned long page_size)
+{
+       unsigned long long unmapped;
+       unsigned long unmap_size;
+       u64 *pte;
+
+       BUG_ON(!is_power_of_2(page_size));
+
+       unmapped = 0;
+
+       while (unmapped < page_size) {
+
+               pte = fetch_pte(dom, bus_addr, &unmap_size);
+
+               if (pte) {
+                       int i, count;
+
+                       count = PAGE_SIZE_PTE_COUNT(unmap_size);
+                       for (i = 0; i < count; i++)
+                               pte[i] = 0ULL;
+               }
+
+               bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
+               unmapped += unmap_size;
+       }
+
+       BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+       return unmapped;
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the domain allocation. A domain is
+ * allocated for every IOMMU as the default domain. If device isolation
+ * is enabled, every device get its own domain. The most important thing
+ * about domains is the page table mapping the DMA address space they
+ * contain.
+ *
+ ****************************************************************************/
+
+static u16 domain_id_alloc(void)
+{
+       int id;
+
+       spin_lock(&pd_bitmap_lock);
+       id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
+       BUG_ON(id == 0);
+       if (id > 0 && id < MAX_DOMAIN_ID)
+               __set_bit(id, amd_iommu_pd_alloc_bitmap);
+       else
+               id = 0;
+       spin_unlock(&pd_bitmap_lock);
+
+       return id;
+}
+
+static void domain_id_free(int id)
+{
+       spin_lock(&pd_bitmap_lock);
+       if (id > 0 && id < MAX_DOMAIN_ID)
+               __clear_bit(id, amd_iommu_pd_alloc_bitmap);
+       spin_unlock(&pd_bitmap_lock);
+}
+
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+       u64 *ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (!(tbl[i] & GCR3_VALID))
+                       continue;
+
+               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
+
+               free_page((unsigned long)ptr);
+       }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+       u64 *ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (!(tbl[i] & GCR3_VALID))
+                       continue;
+
+               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
+
+               free_gcr3_tbl_level1(ptr);
+       }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+       if (domain->glx == 2)
+               free_gcr3_tbl_level2(domain->gcr3_tbl);
+       else if (domain->glx == 1)
+               free_gcr3_tbl_level1(domain->gcr3_tbl);
+       else
+               BUG_ON(domain->glx != 0);
+
+       free_page((unsigned long)domain->gcr3_tbl);
+}
+
+static void set_dte_entry(u16 devid, struct protection_domain *domain,
+                         struct domain_pgtable *pgtable,
+                         bool ats, bool ppr)
+{
+       u64 pte_root = 0;
+       u64 flags = 0;
+       u32 old_domid;
+
+       if (pgtable->mode != PAGE_MODE_NONE)
+               pte_root = iommu_virt_to_phys(pgtable->root);
+
+       pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
+                   << DEV_ENTRY_MODE_SHIFT;
+       pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
+
+       flags = amd_iommu_dev_table[devid].data[1];
+
+       if (ats)
+               flags |= DTE_FLAG_IOTLB;
+
+       if (ppr) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               if (iommu_feature(iommu, FEATURE_EPHSUP))
+                       pte_root |= 1ULL << DEV_ENTRY_PPR;
+       }
+
+       if (domain->flags & PD_IOMMUV2_MASK) {
+               u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
+               u64 glx  = domain->glx;
+               u64 tmp;
+
+               pte_root |= DTE_FLAG_GV;
+               pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+               /* First mask out possible old values for GCR3 table */
+               tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+               flags    &= ~tmp;
+
+               tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+               flags    &= ~tmp;
+
+               /* Encode GCR3 table into DTE */
+               tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+               pte_root |= tmp;
+
+               tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+               flags    |= tmp;
+
+               tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+               flags    |= tmp;
+       }
+
+       flags &= ~DEV_DOMID_MASK;
+       flags |= domain->id;
+
+       old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
+       amd_iommu_dev_table[devid].data[1]  = flags;
+       amd_iommu_dev_table[devid].data[0]  = pte_root;
+
+       /*
+        * A kdump kernel might be replacing a domain ID that was copied from
+        * the previous kernel--if so, it needs to flush the translation cache
+        * entries for the old domain ID that is being overwritten
+        */
+       if (old_domid) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               amd_iommu_flush_tlb_domid(iommu, old_domid);
+       }
+}
+
+static void clear_dte_entry(u16 devid)
+{
+       /* remove entry from the device table seen by the hardware */
+       amd_iommu_dev_table[devid].data[0]  = DTE_FLAG_V | DTE_FLAG_TV;
+       amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+
+       amd_iommu_apply_erratum_63(devid);
+}
+
+static void do_attach(struct iommu_dev_data *dev_data,
+                     struct protection_domain *domain)
+{
+       struct domain_pgtable pgtable;
+       struct amd_iommu *iommu;
+       bool ats;
+
+       iommu = amd_iommu_rlookup_table[dev_data->devid];
+       ats   = dev_data->ats.enabled;
+
+       /* Update data structures */
+       dev_data->domain = domain;
+       list_add(&dev_data->list, &domain->dev_list);
+
+       /* Do reference counting */
+       domain->dev_iommu[iommu->index] += 1;
+       domain->dev_cnt                 += 1;
+
+       /* Update device table */
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       set_dte_entry(dev_data->devid, domain, &pgtable,
+                     ats, dev_data->iommu_v2);
+       clone_aliases(dev_data->pdev);
+
+       device_flush_dte(dev_data);
+}
+
+static void do_detach(struct iommu_dev_data *dev_data)
+{
+       struct protection_domain *domain = dev_data->domain;
+       struct amd_iommu *iommu;
+
+       iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+       /* Update data structures */
+       dev_data->domain = NULL;
+       list_del(&dev_data->list);
+       clear_dte_entry(dev_data->devid);
+       clone_aliases(dev_data->pdev);
+
+       /* Flush the DTE entry */
+       device_flush_dte(dev_data);
+
+       /* Flush IOTLB */
+       domain_flush_tlb_pde(domain);
+
+       /* Wait for the flushes to finish */
+       domain_flush_complete(domain);
+
+       /* decrease reference counters - needs to happen after the flushes */
+       domain->dev_iommu[iommu->index] -= 1;
+       domain->dev_cnt                 -= 1;
+}
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+       pci_disable_ats(pdev);
+       pci_disable_pri(pdev);
+       pci_disable_pasid(pdev);
+}
+
+/* FIXME: Change generic reset-function to do the same */
+static int pri_reset_while_enabled(struct pci_dev *pdev)
+{
+       u16 control;
+       int pos;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+       if (!pos)
+               return -EINVAL;
+
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       control |= PCI_PRI_CTRL_RESET;
+       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+       return 0;
+}
+
+static int pdev_iommuv2_enable(struct pci_dev *pdev)
+{
+       bool reset_enable;
+       int reqs, ret;
+
+       /* FIXME: Hardcode number of outstanding requests for now */
+       reqs = 32;
+       if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
+               reqs = 1;
+       reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+
+       /* Only allow access to user-accessible pages */
+       ret = pci_enable_pasid(pdev, 0);
+       if (ret)
+               goto out_err;
+
+       /* First reset the PRI state of the device */
+       ret = pci_reset_pri(pdev);
+       if (ret)
+               goto out_err;
+
+       /* Enable PRI */
+       ret = pci_enable_pri(pdev, reqs);
+       if (ret)
+               goto out_err;
+
+       if (reset_enable) {
+               ret = pri_reset_while_enabled(pdev);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = pci_enable_ats(pdev, PAGE_SHIFT);
+       if (ret)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       pci_disable_pri(pdev);
+       pci_disable_pasid(pdev);
+
+       return ret;
+}
+
+/*
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
+ */
+static int attach_device(struct device *dev,
+                        struct protection_domain *domain)
+{
+       struct iommu_dev_data *dev_data;
+       struct pci_dev *pdev;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       dev_data = dev_iommu_priv_get(dev);
+
+       spin_lock(&dev_data->lock);
+
+       ret = -EBUSY;
+       if (dev_data->domain != NULL)
+               goto out;
+
+       if (!dev_is_pci(dev))
+               goto skip_ats_check;
+
+       pdev = to_pci_dev(dev);
+       if (domain->flags & PD_IOMMUV2_MASK) {
+               struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
+
+               ret = -EINVAL;
+               if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
+                       goto out;
+
+               if (dev_data->iommu_v2) {
+                       if (pdev_iommuv2_enable(pdev) != 0)
+                               goto out;
+
+                       dev_data->ats.enabled = true;
+                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
+                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
+               }
+       } else if (amd_iommu_iotlb_sup &&
+                  pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+               dev_data->ats.enabled = true;
+               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
+       }
+
+skip_ats_check:
+       ret = 0;
+
+       do_attach(dev_data, domain);
+
+       /*
+        * We might boot into a crash-kernel here. The crashed kernel
+        * left the caches in the IOMMU dirty. So we have to flush
+        * here to evict all dirty stuff.
+        */
+       domain_flush_tlb_pde(domain);
+
+       domain_flush_complete(domain);
+
+out:
+       spin_unlock(&dev_data->lock);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+
+/*
+ * Removes a device from a protection domain (with devtable_lock held)
+ */
+static void detach_device(struct device *dev)
+{
+       struct protection_domain *domain;
+       struct iommu_dev_data *dev_data;
+       unsigned long flags;
+
+       dev_data = dev_iommu_priv_get(dev);
+       domain   = dev_data->domain;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       spin_lock(&dev_data->lock);
+
+       /*
+        * First check if the device is still attached. It might already
+        * be detached from its domain because the generic
+        * iommu_detach_group code detached it and we try again here in
+        * our alias handling.
+        */
+       if (WARN_ON(!dev_data->domain))
+               goto out;
+
+       do_detach(dev_data);
+
+       if (!dev_is_pci(dev))
+               goto out;
+
+       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
+               pdev_iommuv2_disable(to_pci_dev(dev));
+       else if (dev_data->ats.enabled)
+               pci_disable_ats(to_pci_dev(dev));
+
+       dev_data->ats.enabled = false;
+
+out:
+       spin_unlock(&dev_data->lock);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static struct iommu_device *amd_iommu_probe_device(struct device *dev)
+{
+       struct iommu_device *iommu_dev;
+       struct amd_iommu *iommu;
+       int ret, devid;
+
+       if (!check_device(dev))
+               return ERR_PTR(-ENODEV);
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return ERR_PTR(devid);
+
+       iommu = amd_iommu_rlookup_table[devid];
+
+       if (dev_iommu_priv_get(dev))
+               return &iommu->iommu;
+
+       ret = iommu_init_device(dev);
+       if (ret) {
+               if (ret != -ENOTSUPP)
+                       dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+               iommu_dev = ERR_PTR(ret);
+               iommu_ignore_device(dev);
+       } else {
+               iommu_dev = &iommu->iommu;
+       }
+
+       iommu_completion_wait(iommu);
+
+       return iommu_dev;
+}
+
+static void amd_iommu_probe_finalize(struct device *dev)
+{
+       struct iommu_domain *domain;
+
+       /* Domains are initialized for this device - have a look what we ended up with */
+       domain = iommu_get_domain_for_dev(dev);
+       if (domain->type == IOMMU_DOMAIN_DMA)
+               iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+}
+
+static void amd_iommu_release_device(struct device *dev)
+{
+       int devid = get_device_id(dev);
+       struct amd_iommu *iommu;
+
+       if (!check_device(dev))
+               return;
+
+       iommu = amd_iommu_rlookup_table[devid];
+
+       amd_iommu_uninit_device(dev);
+       iommu_completion_wait(iommu);
+}
+
+static struct iommu_group *amd_iommu_device_group(struct device *dev)
+{
+       if (dev_is_pci(dev))
+               return pci_device_group(dev);
+
+       return acpihid_device_group(dev);
+}
+
+static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+               enum iommu_attr attr, void *data)
+{
+       switch (domain->type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               return -ENODEV;
+       case IOMMU_DOMAIN_DMA:
+               switch (attr) {
+               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+                       *(int *)data = !amd_iommu_unmap_flush;
+                       return 0;
+               default:
+                       return -ENODEV;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+}
+
+/*****************************************************************************
+ *
+ * The next functions belong to the dma_ops mapping/unmapping code.
+ *
+ *****************************************************************************/
+
+static void update_device_table(struct protection_domain *domain,
+                               struct domain_pgtable *pgtable)
+{
+       struct iommu_dev_data *dev_data;
+
+       list_for_each_entry(dev_data, &domain->dev_list, list) {
+               set_dte_entry(dev_data->devid, domain, pgtable,
+                             dev_data->ats.enabled, dev_data->iommu_v2);
+               clone_aliases(dev_data->pdev);
+       }
+}
+
+static void update_and_flush_device_table(struct protection_domain *domain,
+                                         struct domain_pgtable *pgtable)
+{
+       update_device_table(domain, pgtable);
+       domain_flush_devices(domain);
+}
+
+static void update_domain(struct protection_domain *domain)
+{
+       struct domain_pgtable pgtable;
+
+       /* Update device table */
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       update_and_flush_device_table(domain, &pgtable);
+
+       /* Flush domain TLB(s) and wait for completion */
+       domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
+}
+
+int __init amd_iommu_init_api(void)
+{
+       int ret, err = 0;
+
+       ret = iova_cache_get();
+       if (ret)
+               return ret;
+
+       err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+       if (err)
+               return err;
+#ifdef CONFIG_ARM_AMBA
+       err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
+       if (err)
+               return err;
+#endif
+       err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+int __init amd_iommu_init_dma_ops(void)
+{
+       swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
+
+       if (amd_iommu_unmap_flush)
+               pr_info("IO/TLB flush on unmap enabled\n");
+       else
+               pr_info("Lazy IO/TLB flushing enabled\n");
+
+       return 0;
+
+}
+
+/*****************************************************************************
+ *
+ * The following functions belong to the exported interface of AMD IOMMU
+ *
+ * This interface allows access to lower level functions of the IOMMU
+ * like protection domain handling and assignement of devices to domains
+ * which is not possible with the dma_ops interface.
+ *
+ *****************************************************************************/
+
+static void cleanup_domain(struct protection_domain *domain)
+{
+       struct iommu_dev_data *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       while (!list_empty(&domain->dev_list)) {
+               entry = list_first_entry(&domain->dev_list,
+                                        struct iommu_dev_data, list);
+               BUG_ON(!entry->domain);
+               do_detach(entry);
+       }
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void protection_domain_free(struct protection_domain *domain)
+{
+       struct domain_pgtable pgtable;
+
+       if (!domain)
+               return;
+
+       if (domain->id)
+               domain_id_free(domain->id);
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       atomic64_set(&domain->pt_root, 0);
+       free_pagetable(&pgtable);
+
+       kfree(domain);
+}
+
+static int protection_domain_init(struct protection_domain *domain, int mode)
+{
+       u64 *pt_root = NULL, root;
+
+       BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
+
+       spin_lock_init(&domain->lock);
+       domain->id = domain_id_alloc();
+       if (!domain->id)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&domain->dev_list);
+
+       if (mode != PAGE_MODE_NONE) {
+               pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pt_root)
+                       return -ENOMEM;
+       }
+
+       root = amd_iommu_domain_encode_pgtable(pt_root, mode);
+       atomic64_set(&domain->pt_root, root);
+
+       return 0;
+}
+
+static struct protection_domain *protection_domain_alloc(int mode)
+{
+       struct protection_domain *domain;
+
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
+       if (protection_domain_init(domain, mode))
+               goto out_err;
+
+       return domain;
+
+out_err:
+       kfree(domain);
+
+       return NULL;
+}
+
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+{
+       struct protection_domain *domain;
+       int mode = DEFAULT_PGTABLE_LEVEL;
+
+       if (type == IOMMU_DOMAIN_IDENTITY)
+               mode = PAGE_MODE_NONE;
+
+       domain = protection_domain_alloc(mode);
+       if (!domain)
+               return NULL;
+
+       domain->domain.geometry.aperture_start = 0;
+       domain->domain.geometry.aperture_end   = ~0ULL;
+       domain->domain.geometry.force_aperture = true;
+
+       if (type == IOMMU_DOMAIN_DMA &&
+           iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+               goto free_domain;
+
+       return &domain->domain;
+
+free_domain:
+       protection_domain_free(domain);
+
+       return NULL;
+}
+
+static void amd_iommu_domain_free(struct iommu_domain *dom)
+{
+       struct protection_domain *domain;
+
+       domain = to_pdomain(dom);
+
+       if (domain->dev_cnt > 0)
+               cleanup_domain(domain);
+
+       BUG_ON(domain->dev_cnt != 0);
+
+       if (!dom)
+               return;
+
+       if (dom->type == IOMMU_DOMAIN_DMA)
+               iommu_put_dma_cookie(&domain->domain);
+
+       if (domain->flags & PD_IOMMUV2_MASK)
+               free_gcr3_table(domain);
+
+       protection_domain_free(domain);
+}
+
+static void amd_iommu_detach_device(struct iommu_domain *dom,
+                                   struct device *dev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+       struct amd_iommu *iommu;
+       int devid;
+
+       if (!check_device(dev))
+               return;
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return;
+
+       if (dev_data->domain != NULL)
+               detach_device(dev);
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (!iommu)
+               return;
+
+#ifdef CONFIG_IRQ_REMAP
+       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+           (dom->type == IOMMU_DOMAIN_UNMANAGED))
+               dev_data->use_vapic = 0;
+#endif
+
+       iommu_completion_wait(iommu);
+}
+
+static int amd_iommu_attach_device(struct iommu_domain *dom,
+                                  struct device *dev)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
+       int ret;
+
+       if (!check_device(dev))
+               return -EINVAL;
+
+       dev_data = dev_iommu_priv_get(dev);
+       dev_data->defer_attach = false;
+
+       iommu = amd_iommu_rlookup_table[dev_data->devid];
+       if (!iommu)
+               return -EINVAL;
+
+       if (dev_data->domain)
+               detach_device(dev);
+
+       ret = attach_device(dev, domain);
+
+#ifdef CONFIG_IRQ_REMAP
+       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+               if (dom->type == IOMMU_DOMAIN_UNMANAGED)
+                       dev_data->use_vapic = 1;
+               else
+                       dev_data->use_vapic = 0;
+       }
+#endif
+
+       iommu_completion_wait(iommu);
+
+       return ret;
+}
+
+static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
+                        phys_addr_t paddr, size_t page_size, int iommu_prot,
+                        gfp_t gfp)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
+       int prot = 0;
+       int ret;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
+               return -EINVAL;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= IOMMU_PROT_IR;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= IOMMU_PROT_IW;
+
+       ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
+
+       domain_flush_np_cache(domain, iova, page_size);
+
+       return ret;
+}
+
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+                             size_t page_size,
+                             struct iommu_iotlb_gather *gather)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
+               return 0;
+
+       return iommu_unmap_page(domain, iova, page_size);
+}
+
+static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+                                         dma_addr_t iova)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long offset_mask, pte_pgsize;
+       struct domain_pgtable pgtable;
+       u64 *pte, __pte;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
+               return iova;
+
+       pte = fetch_pte(domain, iova, &pte_pgsize);
+
+       if (!pte || !IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       offset_mask = pte_pgsize - 1;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
+
+       return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+static bool amd_iommu_capable(enum iommu_cap cap)
+{
+       switch (cap) {
+       case IOMMU_CAP_CACHE_COHERENCY:
+               return true;
+       case IOMMU_CAP_INTR_REMAP:
+               return (irq_remapping_enabled == 1);
+       case IOMMU_CAP_NOEXEC:
+               return false;
+       default:
+               break;
+       }
+
+       return false;
+}
+
+static void amd_iommu_get_resv_regions(struct device *dev,
+                                      struct list_head *head)
+{
+       struct iommu_resv_region *region;
+       struct unity_map_entry *entry;
+       int devid;
+
+       devid = get_device_id(dev);
+       if (devid < 0)
+               return;
+
+       list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+               int type, prot = 0;
+               size_t length;
+
+               if (devid < entry->devid_start || devid > entry->devid_end)
+                       continue;
+
+               type   = IOMMU_RESV_DIRECT;
+               length = entry->address_end - entry->address_start;
+               if (entry->prot & IOMMU_PROT_IR)
+                       prot |= IOMMU_READ;
+               if (entry->prot & IOMMU_PROT_IW)
+                       prot |= IOMMU_WRITE;
+               if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+                       /* Exclusion range */
+                       type = IOMMU_RESV_RESERVED;
+
+               region = iommu_alloc_resv_region(entry->address_start,
+                                                length, prot, type);
+               if (!region) {
+                       dev_err(dev, "Out of memory allocating dm-regions\n");
+                       return;
+               }
+               list_add_tail(&region->list, head);
+       }
+
+       region = iommu_alloc_resv_region(MSI_RANGE_START,
+                                        MSI_RANGE_END - MSI_RANGE_START + 1,
+                                        0, IOMMU_RESV_MSI);
+       if (!region)
+               return;
+       list_add_tail(&region->list, head);
+
+       region = iommu_alloc_resv_region(HT_RANGE_START,
+                                        HT_RANGE_END - HT_RANGE_START + 1,
+                                        0, IOMMU_RESV_RESERVED);
+       if (!region)
+               return;
+       list_add_tail(&region->list, head);
+}
+
+bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+                                 struct device *dev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
+       return dev_data->defer_attach;
+}
+EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
+
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       struct protection_domain *dom = to_pdomain(domain);
+       unsigned long flags;
+
+       spin_lock_irqsave(&dom->lock, flags);
+       domain_flush_tlb_pde(dom);
+       domain_flush_complete(dom);
+       spin_unlock_irqrestore(&dom->lock, flags);
+}
+
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+                                struct iommu_iotlb_gather *gather)
+{
+       amd_iommu_flush_iotlb_all(domain);
+}
+
+static int amd_iommu_def_domain_type(struct device *dev)
+{
+       struct iommu_dev_data *dev_data;
+
+       dev_data = dev_iommu_priv_get(dev);
+       if (!dev_data)
+               return 0;
+
+       if (dev_data->iommu_v2)
+               return IOMMU_DOMAIN_IDENTITY;
+
+       return 0;
+}
+
+const struct iommu_ops amd_iommu_ops = {
+       .capable = amd_iommu_capable,
+       .domain_alloc = amd_iommu_domain_alloc,
+       .domain_free  = amd_iommu_domain_free,
+       .attach_dev = amd_iommu_attach_device,
+       .detach_dev = amd_iommu_detach_device,
+       .map = amd_iommu_map,
+       .unmap = amd_iommu_unmap,
+       .iova_to_phys = amd_iommu_iova_to_phys,
+       .probe_device = amd_iommu_probe_device,
+       .release_device = amd_iommu_release_device,
+       .probe_finalize = amd_iommu_probe_finalize,
+       .device_group = amd_iommu_device_group,
+       .domain_get_attr = amd_iommu_domain_get_attr,
+       .get_resv_regions = amd_iommu_get_resv_regions,
+       .put_resv_regions = generic_iommu_put_resv_regions,
+       .is_attach_deferred = amd_iommu_is_attach_deferred,
+       .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
+       .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+       .iotlb_sync = amd_iommu_iotlb_sync,
+       .def_domain_type = amd_iommu_def_domain_type,
+};
+
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       /* First save pgtable configuration*/
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       /* Update data structure */
+       atomic64_set(&domain->pt_root, 0);
+
+       /* Make changes visible to IOMMUs */
+       update_domain(domain);
+
+       /* Page-table is not visible to IOMMU anymore, so free it */
+       free_pagetable(&pgtable);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long flags;
+       int levels, ret;
+
+       if (pasids <= 0 || pasids > (PASID_MASK + 1))
+               return -EINVAL;
+
+       /* Number of GCR3 table levels required */
+       for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+               levels += 1;
+
+       if (levels > amd_iommu_max_glx_val)
+               return -EINVAL;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       /*
+        * Save us all sanity checks whether devices already in the
+        * domain support IOMMUv2. Just force that the domain has no
+        * devices attached when it is switched into IOMMUv2 mode.
+        */
+       ret = -EBUSY;
+       if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+               goto out;
+
+       ret = -ENOMEM;
+       domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+       if (domain->gcr3_tbl == NULL)
+               goto out;
+
+       domain->glx      = levels;
+       domain->flags   |= PD_IOMMUV2_MASK;
+
+       update_domain(domain);
+
+       ret = 0;
+
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, int pasid,
+                        u64 address, bool size)
+{
+       struct iommu_dev_data *dev_data;
+       struct iommu_cmd cmd;
+       int i, ret;
+
+       if (!(domain->flags & PD_IOMMUV2_MASK))
+               return -EINVAL;
+
+       build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+       /*
+        * IOMMU TLB needs to be flushed before Device TLB to
+        * prevent device TLB refill from IOMMU TLB
+        */
+       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+               if (domain->dev_iommu[i] == 0)
+                       continue;
+
+               ret = iommu_queue_command(amd_iommus[i], &cmd);
+               if (ret != 0)
+                       goto out;
+       }
+
+       /* Wait until IOMMU TLB flushes are complete */
+       domain_flush_complete(domain);
+
+       /* Now flush device TLBs */
+       list_for_each_entry(dev_data, &domain->dev_list, list) {
+               struct amd_iommu *iommu;
+               int qdep;
+
+               /*
+                  There might be non-IOMMUv2 capable devices in an IOMMUv2
+                * domain.
+                */
+               if (!dev_data->ats.enabled)
+                       continue;
+
+               qdep  = dev_data->ats.qdep;
+               iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+               build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+                                     qdep, address, size);
+
+               ret = iommu_queue_command(iommu, &cmd);
+               if (ret != 0)
+                       goto out;
+       }
+
+       /* Wait until all device TLBs are flushed */
+       domain_flush_complete(domain);
+
+       ret = 0;
+
+out:
+
+       return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+                                 u64 address)
+{
+       return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+                        u64 address)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __amd_iommu_flush_page(domain, pasid, address);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+{
+       return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                            true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __amd_iommu_flush_tlb(domain, pasid);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+{
+       int index;
+       u64 *pte;
+
+       while (true) {
+
+               index = (pasid >> (9 * level)) & 0x1ff;
+               pte   = &root[index];
+
+               if (level == 0)
+                       break;
+
+               if (!(*pte & GCR3_VALID)) {
+                       if (!alloc)
+                               return NULL;
+
+                       root = (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (root == NULL)
+                               return NULL;
+
+                       *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+               }
+
+               root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+               level -= 1;
+       }
+
+       return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, int pasid,
+                     unsigned long cr3)
+{
+       struct domain_pgtable pgtable;
+       u64 *pte;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode != PAGE_MODE_NONE)
+               return -EINVAL;
+
+       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+       if (pte == NULL)
+               return -ENOMEM;
+
+       *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+       return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, int pasid)
+{
+       struct domain_pgtable pgtable;
+       u64 *pte;
+
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode != PAGE_MODE_NONE)
+               return -EINVAL;
+
+       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+       if (pte == NULL)
+               return 0;
+
+       *pte = 0;
+
+       return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+                             unsigned long cr3)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __set_gcr3(domain, pasid, cr3);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+{
+       struct protection_domain *domain = to_pdomain(dom);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __clear_gcr3(domain, pasid);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+                          int status, int tag)
+{
+       struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
+       struct iommu_cmd cmd;
+
+       dev_data = dev_iommu_priv_get(&pdev->dev);
+       iommu    = amd_iommu_rlookup_table[dev_data->devid];
+
+       build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+                          tag, dev_data->pri_tlp);
+
+       return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
+{
+       struct protection_domain *pdomain;
+       struct iommu_dev_data *dev_data;
+       struct device *dev = &pdev->dev;
+       struct iommu_domain *io_domain;
+
+       if (!check_device(dev))
+               return NULL;
+
+       dev_data  = dev_iommu_priv_get(&pdev->dev);
+       pdomain   = dev_data->domain;
+       io_domain = iommu_get_domain_for_dev(dev);
+
+       if (pdomain == NULL && dev_data->defer_attach) {
+               dev_data->defer_attach = false;
+               pdomain = to_pdomain(io_domain);
+               attach_device(dev, pdomain);
+       }
+
+       if (pdomain == NULL)
+               return NULL;
+
+       if (io_domain->type != IOMMU_DOMAIN_DMA)
+               return NULL;
+
+       /* Only return IOMMUv2 domains */
+       if (!(pdomain->flags & PD_IOMMUV2_MASK))
+               return NULL;
+
+       return &pdomain->domain;
+}
+EXPORT_SYMBOL(amd_iommu_get_v2_domain);
+
+void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
+{
+       struct iommu_dev_data *dev_data;
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       dev_data = dev_iommu_priv_get(&pdev->dev);
+       dev_data->errata |= (1 << erratum);
+}
+EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+                          struct amd_iommu_device_info *info)
+{
+       int max_pasids;
+       int pos;
+
+       if (pdev == NULL || info == NULL)
+               return -EINVAL;
+
+       if (!amd_iommu_v2_supported())
+               return -EINVAL;
+
+       memset(info, 0, sizeof(*info));
+
+       if (pci_ats_supported(pdev))
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+       if (pos)
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+       if (pos) {
+               int features;
+
+               max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+               max_pasids = min(max_pasids, (1 << 20));
+
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+               info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+               features = pci_pasid_features(pdev);
+               if (features & PCI_PASID_CAP_EXEC)
+                       info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+               if (features & PCI_PASID_CAP_PRIV)
+                       info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*****************************************************************************
+ *
+ * Interrupt Remapping Implementation
+ *
+ *****************************************************************************/
+
+static struct irq_chip amd_ir_chip;
+static DEFINE_SPINLOCK(iommu_table_lock);
+
+static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+{
+       u64 dte;
+
+       dte     = amd_iommu_dev_table[devid].data[2];
+       dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
+       dte     |= iommu_virt_to_phys(table->table);
+       dte     |= DTE_IRQ_REMAP_INTCTL;
+       dte     |= DTE_IRQ_TABLE_LEN;
+       dte     |= DTE_IRQ_REMAP_ENABLE;
+
+       amd_iommu_dev_table[devid].data[2] = dte;
+}
+
+static struct irq_remap_table *get_irq_table(u16 devid)
+{
+       struct irq_remap_table *table;
+
+       if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
+                     "%s: no iommu for devid %x\n", __func__, devid))
+               return NULL;
+
+       table = irq_lookup_table[devid];
+       if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+               return NULL;
+
+       return table;
+}
+
+static struct irq_remap_table *__alloc_irq_table(void)
+{
+       struct irq_remap_table *table;
+
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return NULL;
+
+       table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+       if (!table->table) {
+               kfree(table);
+               return NULL;
+       }
+       raw_spin_lock_init(&table->lock);
+
+       if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+               memset(table->table, 0,
+                      MAX_IRQS_PER_TABLE * sizeof(u32));
+       else
+               memset(table->table, 0,
+                      (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+       return table;
+}
+
+static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
+                                 struct irq_remap_table *table)
+{
+       irq_lookup_table[devid] = table;
+       set_dte_irq_entry(devid, table);
+       iommu_flush_dte(iommu, devid);
+}
+
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+                                      void *data)
+{
+       struct irq_remap_table *table = data;
+
+       irq_lookup_table[alias] = table;
+       set_dte_irq_entry(alias, table);
+
+       iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+
+       return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
+{
+       struct irq_remap_table *table = NULL;
+       struct irq_remap_table *new_table = NULL;
+       struct amd_iommu *iommu;
+       unsigned long flags;
+       u16 alias;
+
+       spin_lock_irqsave(&iommu_table_lock, flags);
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (!iommu)
+               goto out_unlock;
+
+       table = irq_lookup_table[devid];
+       if (table)
+               goto out_unlock;
+
+       alias = amd_iommu_alias_table[devid];
+       table = irq_lookup_table[alias];
+       if (table) {
+               set_remap_table_entry(iommu, devid, table);
+               goto out_wait;
+       }
+       spin_unlock_irqrestore(&iommu_table_lock, flags);
+
+       /* Nothing there yet, allocate new irq remapping table */
+       new_table = __alloc_irq_table();
+       if (!new_table)
+               return NULL;
+
+       spin_lock_irqsave(&iommu_table_lock, flags);
+
+       table = irq_lookup_table[devid];
+       if (table)
+               goto out_unlock;
+
+       table = irq_lookup_table[alias];
+       if (table) {
+               set_remap_table_entry(iommu, devid, table);
+               goto out_wait;
+       }
+
+       table = new_table;
+       new_table = NULL;
+
+       if (pdev)
+               pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+                                      table);
+       else
+               set_remap_table_entry(iommu, devid, table);
+
+       if (devid != alias)
+               set_remap_table_entry(iommu, alias, table);
+
+out_wait:
+       iommu_completion_wait(iommu);
+
+out_unlock:
+       spin_unlock_irqrestore(&iommu_table_lock, flags);
+
+       if (new_table) {
+               kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+               kfree(new_table);
+       }
+       return table;
+}
+
+static int alloc_irq_index(u16 devid, int count, bool align,
+                          struct pci_dev *pdev)
+{
+       struct irq_remap_table *table;
+       int index, c, alignment = 1;
+       unsigned long flags;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+       if (!iommu)
+               return -ENODEV;
+
+       table = alloc_irq_table(devid, pdev);
+       if (!table)
+               return -ENODEV;
+
+       if (align)
+               alignment = roundup_pow_of_two(count);
+
+       raw_spin_lock_irqsave(&table->lock, flags);
+
+       /* Scan table for free entries */
+       for (index = ALIGN(table->min_index, alignment), c = 0;
+            index < MAX_IRQS_PER_TABLE;) {
+               if (!iommu->irte_ops->is_allocated(table, index)) {
+                       c += 1;
+               } else {
+                       c     = 0;
+                       index = ALIGN(index + 1, alignment);
+                       continue;
+               }
+
+               if (c == count) {
+                       for (; c != 0; --c)
+                               iommu->irte_ops->set_allocated(table, index - c + 1);
+
+                       index -= count - 1;
+                       goto out;
+               }
+
+               index++;
+       }
+
+       index = -ENOSPC;
+
+out:
+       raw_spin_unlock_irqrestore(&table->lock, flags);
+
+       return index;
+}
+
+static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
+                         struct amd_ir_data *data)
+{
+       struct irq_remap_table *table;
+       struct amd_iommu *iommu;
+       unsigned long flags;
+       struct irte_ga *entry;
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (iommu == NULL)
+               return -EINVAL;
+
+       table = get_irq_table(devid);
+       if (!table)
+               return -ENOMEM;
+
+       raw_spin_lock_irqsave(&table->lock, flags);
+
+       entry = (struct irte_ga *)table->table;
+       entry = &entry[index];
+       entry->lo.fields_remap.valid = 0;
+       entry->hi.val = irte->hi.val;
+       entry->lo.val = irte->lo.val;
+       entry->lo.fields_remap.valid = 1;
+       if (data)
+               data->ref = entry;
+
+       raw_spin_unlock_irqrestore(&table->lock, flags);
+
+       iommu_flush_irt(iommu, devid);
+       iommu_completion_wait(iommu);
+
+       return 0;
+}
+
+static int modify_irte(u16 devid, int index, union irte *irte)
+{
+       struct irq_remap_table *table;
+       struct amd_iommu *iommu;
+       unsigned long flags;
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (iommu == NULL)
+               return -EINVAL;
+
+       table = get_irq_table(devid);
+       if (!table)
+               return -ENOMEM;
+
+       raw_spin_lock_irqsave(&table->lock, flags);
+       table->table[index] = irte->val;
+       raw_spin_unlock_irqrestore(&table->lock, flags);
+
+       iommu_flush_irt(iommu, devid);
+       iommu_completion_wait(iommu);
+
+       return 0;
+}
+
+static void free_irte(u16 devid, int index)
+{
+       struct irq_remap_table *table;
+       struct amd_iommu *iommu;
+       unsigned long flags;
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (iommu == NULL)
+               return;
+
+       table = get_irq_table(devid);
+       if (!table)
+               return;
+
+       raw_spin_lock_irqsave(&table->lock, flags);
+       iommu->irte_ops->clear_allocated(table, index);
+       raw_spin_unlock_irqrestore(&table->lock, flags);
+
+       iommu_flush_irt(iommu, devid);
+       iommu_completion_wait(iommu);
+}
+
+static void irte_prepare(void *entry,
+                        u32 delivery_mode, u32 dest_mode,
+                        u8 vector, u32 dest_apicid, int devid)
+{
+       union irte *irte = (union irte *) entry;
+
+       irte->val                = 0;
+       irte->fields.vector      = vector;
+       irte->fields.int_type    = delivery_mode;
+       irte->fields.destination = dest_apicid;
+       irte->fields.dm          = dest_mode;
+       irte->fields.valid       = 1;
+}
+
+static void irte_ga_prepare(void *entry,
+                           u32 delivery_mode, u32 dest_mode,
+                           u8 vector, u32 dest_apicid, int devid)
+{
+       struct irte_ga *irte = (struct irte_ga *) entry;
+
+       irte->lo.val                      = 0;
+       irte->hi.val                      = 0;
+       irte->lo.fields_remap.int_type    = delivery_mode;
+       irte->lo.fields_remap.dm          = dest_mode;
+       irte->hi.fields.vector            = vector;
+       irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
+       irte->hi.fields.destination       = APICID_TO_IRTE_DEST_HI(dest_apicid);
+       irte->lo.fields_remap.valid       = 1;
+}
+
+static void irte_activate(void *entry, u16 devid, u16 index)
+{
+       union irte *irte = (union irte *) entry;
+
+       irte->fields.valid = 1;
+       modify_irte(devid, index, irte);
+}
+
+static void irte_ga_activate(void *entry, u16 devid, u16 index)
+{
+       struct irte_ga *irte = (struct irte_ga *) entry;
+
+       irte->lo.fields_remap.valid = 1;
+       modify_irte_ga(devid, index, irte, NULL);
+}
+
+static void irte_deactivate(void *entry, u16 devid, u16 index)
+{
+       union irte *irte = (union irte *) entry;
+
+       irte->fields.valid = 0;
+       modify_irte(devid, index, irte);
+}
+
+static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+{
+       struct irte_ga *irte = (struct irte_ga *) entry;
+
+       irte->lo.fields_remap.valid = 0;
+       modify_irte_ga(devid, index, irte, NULL);
+}
+
+static void irte_set_affinity(void *entry, u16 devid, u16 index,
+                             u8 vector, u32 dest_apicid)
+{
+       union irte *irte = (union irte *) entry;
+
+       irte->fields.vector = vector;
+       irte->fields.destination = dest_apicid;
+       modify_irte(devid, index, irte);
+}
+
+static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+                                u8 vector, u32 dest_apicid)
+{
+       struct irte_ga *irte = (struct irte_ga *) entry;
+
+       if (!irte->lo.fields_remap.guest_mode) {
+               irte->hi.fields.vector = vector;
+               irte->lo.fields_remap.destination =
+                                       APICID_TO_IRTE_DEST_LO(dest_apicid);
+               irte->hi.fields.destination =
+                                       APICID_TO_IRTE_DEST_HI(dest_apicid);
+               modify_irte_ga(devid, index, irte, NULL);
+       }
+}
+
+#define IRTE_ALLOCATED (~1U)
+static void irte_set_allocated(struct irq_remap_table *table, int index)
+{
+       table->table[index] = IRTE_ALLOCATED;
+}
+
+static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
+{
+       struct irte_ga *ptr = (struct irte_ga *)table->table;
+       struct irte_ga *irte = &ptr[index];
+
+       memset(&irte->lo.val, 0, sizeof(u64));
+       memset(&irte->hi.val, 0, sizeof(u64));
+       irte->hi.fields.vector = 0xff;
+}
+
+static bool irte_is_allocated(struct irq_remap_table *table, int index)
+{
+       union irte *ptr = (union irte *)table->table;
+       union irte *irte = &ptr[index];
+
+       return irte->val != 0;
+}
+
+static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
+{
+       struct irte_ga *ptr = (struct irte_ga *)table->table;
+       struct irte_ga *irte = &ptr[index];
+
+       return irte->hi.fields.vector != 0;
+}
+
+static void irte_clear_allocated(struct irq_remap_table *table, int index)
+{
+       table->table[index] = 0;
+}
+
+static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
+{
+       struct irte_ga *ptr = (struct irte_ga *)table->table;
+       struct irte_ga *irte = &ptr[index];
+
+       memset(&irte->lo.val, 0, sizeof(u64));
+       memset(&irte->hi.val, 0, sizeof(u64));
+}
+
+static int get_devid(struct irq_alloc_info *info)
+{
+       int devid = -1;
+
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               devid     = get_ioapic_devid(info->ioapic_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_HPET:
+               devid     = get_hpet_devid(info->hpet_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               devid = get_device_id(&info->msi_dev->dev);
+               break;
+       default:
+               BUG_ON(1);
+               break;
+       }
+
+       return devid;
+}
+
+static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
+{
+       struct amd_iommu *iommu;
+       int devid;
+
+       if (!info)
+               return NULL;
+
+       devid = get_devid(info);
+       if (devid >= 0) {
+               iommu = amd_iommu_rlookup_table[devid];
+               if (iommu)
+                       return iommu->ir_domain;
+       }
+
+       return NULL;
+}
+
+static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
+{
+       struct amd_iommu *iommu;
+       int devid;
+
+       if (!info)
+               return NULL;
+
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               devid = get_device_id(&info->msi_dev->dev);
+               if (devid < 0)
+                       return NULL;
+
+               iommu = amd_iommu_rlookup_table[devid];
+               if (iommu)
+                       return iommu->msi_domain;
+               break;
+       default:
+               break;
+       }
+
+       return NULL;
+}
+
+struct irq_remap_ops amd_iommu_irq_ops = {
+       .prepare                = amd_iommu_prepare,
+       .enable                 = amd_iommu_enable,
+       .disable                = amd_iommu_disable,
+       .reenable               = amd_iommu_reenable,
+       .enable_faulting        = amd_iommu_enable_faulting,
+       .get_ir_irq_domain      = get_ir_irq_domain,
+       .get_irq_domain         = get_irq_domain,
+};
+
+static void irq_remapping_prepare_irte(struct amd_ir_data *data,
+                                      struct irq_cfg *irq_cfg,
+                                      struct irq_alloc_info *info,
+                                      int devid, int index, int sub_handle)
+{
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
+       struct msi_msg *msg = &data->msi_entry;
+       struct IO_APIC_route_entry *entry;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+       if (!iommu)
+               return;
+
+       data->irq_2_irte.devid = devid;
+       data->irq_2_irte.index = index + sub_handle;
+       iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
+                                apic->irq_dest_mode, irq_cfg->vector,
+                                irq_cfg->dest_apicid, devid);
+
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               /* Setup IOAPIC entry */
+               entry = info->ioapic_entry;
+               info->ioapic_entry = NULL;
+               memset(entry, 0, sizeof(*entry));
+               entry->vector        = index;
+               entry->mask          = 0;
+               entry->trigger       = info->ioapic_trigger;
+               entry->polarity      = info->ioapic_polarity;
+               /* Mask level triggered irqs. */
+               if (info->ioapic_trigger)
+                       entry->mask = 1;
+               break;
+
+       case X86_IRQ_ALLOC_TYPE_HPET:
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->address_lo = MSI_ADDR_BASE_LO;
+               msg->data = irte_info->index;
+               break;
+
+       default:
+               BUG_ON(1);
+               break;
+       }
+}
+
+struct amd_irte_ops irte_32_ops = {
+       .prepare = irte_prepare,
+       .activate = irte_activate,
+       .deactivate = irte_deactivate,
+       .set_affinity = irte_set_affinity,
+       .set_allocated = irte_set_allocated,
+       .is_allocated = irte_is_allocated,
+       .clear_allocated = irte_clear_allocated,
+};
+
+struct amd_irte_ops irte_128_ops = {
+       .prepare = irte_ga_prepare,
+       .activate = irte_ga_activate,
+       .deactivate = irte_ga_deactivate,
+       .set_affinity = irte_ga_set_affinity,
+       .set_allocated = irte_ga_set_allocated,
+       .is_allocated = irte_ga_is_allocated,
+       .clear_allocated = irte_ga_clear_allocated,
+};
+
+static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs, void *arg)
+{
+       struct irq_alloc_info *info = arg;
+       struct irq_data *irq_data;
+       struct amd_ir_data *data = NULL;
+       struct irq_cfg *cfg;
+       int i, ret, devid;
+       int index;
+
+       if (!info)
+               return -EINVAL;
+       if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
+           info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+               return -EINVAL;
+
+       /*
+        * With IRQ remapping enabled, don't need contiguous CPU vectors
+        * to support multiple MSI interrupts.
+        */
+       if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+       devid = get_devid(info);
+       if (devid < 0)
+               return -EINVAL;
+
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+       if (ret < 0)
+               return ret;
+
+       if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
+               struct irq_remap_table *table;
+               struct amd_iommu *iommu;
+
+               table = alloc_irq_table(devid, NULL);
+               if (table) {
+                       if (!table->min_index) {
+                               /*
+                                * Keep the first 32 indexes free for IOAPIC
+                                * interrupts.
+                                */
+                               table->min_index = 32;
+                               iommu = amd_iommu_rlookup_table[devid];
+                               for (i = 0; i < 32; ++i)
+                                       iommu->irte_ops->set_allocated(table, i);
+                       }
+                       WARN_ON(table->min_index != 32);
+                       index = info->ioapic_pin;
+               } else {
+                       index = -ENOMEM;
+               }
+       } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
+                  info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
+               bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
+
+               index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+       } else {
+               index = alloc_irq_index(devid, nr_irqs, false, NULL);
+       }
+
+       if (index < 0) {
+               pr_warn("Failed to allocate IRTE\n");
+               ret = index;
+               goto out_free_parent;
+       }
+
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               cfg = irqd_cfg(irq_data);
+               if (!irq_data || !cfg) {
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
+
+               ret = -ENOMEM;
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       goto out_free_data;
+
+               if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+                       data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
+               else
+                       data->entry = kzalloc(sizeof(struct irte_ga),
+                                                    GFP_KERNEL);
+               if (!data->entry) {
+                       kfree(data);
+                       goto out_free_data;
+               }
+
+               irq_data->hwirq = (devid << 16) + i;
+               irq_data->chip_data = data;
+               irq_data->chip = &amd_ir_chip;
+               irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
+               irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
+       }
+
+       return 0;
+
+out_free_data:
+       for (i--; i >= 0; i--) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               if (irq_data)
+                       kfree(irq_data->chip_data);
+       }
+       for (i = 0; i < nr_irqs; i++)
+               free_irte(devid, index + i);
+out_free_parent:
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
+       return ret;
+}
+
+static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs)
+{
+       struct irq_2_irte *irte_info;
+       struct irq_data *irq_data;
+       struct amd_ir_data *data;
+       int i;
+
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq  + i);
+               if (irq_data && irq_data->chip_data) {
+                       data = irq_data->chip_data;
+                       irte_info = &data->irq_2_irte;
+                       free_irte(irte_info->devid, irte_info->index);
+                       kfree(data->entry);
+                       kfree(data);
+               }
+       }
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+                              struct amd_ir_data *ir_data,
+                              struct irq_2_irte *irte_info,
+                              struct irq_cfg *cfg);
+
+static int irq_remapping_activate(struct irq_domain *domain,
+                                 struct irq_data *irq_data, bool reserve)
+{
+       struct amd_ir_data *data = irq_data->chip_data;
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+       struct irq_cfg *cfg = irqd_cfg(irq_data);
+
+       if (!iommu)
+               return 0;
+
+       iommu->irte_ops->activate(data->entry, irte_info->devid,
+                                 irte_info->index);
+       amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
+       return 0;
+}
+
+static void irq_remapping_deactivate(struct irq_domain *domain,
+                                    struct irq_data *irq_data)
+{
+       struct amd_ir_data *data = irq_data->chip_data;
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+
+       if (iommu)
+               iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+                                           irte_info->index);
+}
+
+static const struct irq_domain_ops amd_ir_domain_ops = {
+       .alloc = irq_remapping_alloc,
+       .free = irq_remapping_free,
+       .activate = irq_remapping_activate,
+       .deactivate = irq_remapping_deactivate,
+};
+
+int amd_iommu_activate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_vapic.guest_mode  = 1;
+       entry->lo.fields_vapic.ga_log_intr = 1;
+       entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
+       entry->hi.fields.vector            = ir_data->ga_vector;
+       entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, ir_data);
+}
+EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+
+int amd_iommu_deactivate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+       struct irq_cfg *cfg = ir_data->cfg;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || !entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_remap.dm          = apic->irq_dest_mode;
+       entry->lo.fields_remap.int_type    = apic->irq_delivery_mode;
+       entry->hi.fields.vector            = cfg->vector;
+       entry->lo.fields_remap.destination =
+                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+       entry->hi.fields.destination =
+                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, ir_data);
+}
+EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+{
+       int ret;
+       struct amd_iommu *iommu;
+       struct amd_iommu_pi_data *pi_data = vcpu_info;
+       struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+       struct amd_ir_data *ir_data = data->chip_data;
+       struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+       struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+
+       /* Note:
+        * This device has never been set up for guest mode.
+        * we should not modify the IRTE
+        */
+       if (!dev_data || !dev_data->use_vapic)
+               return 0;
+
+       ir_data->cfg = irqd_cfg(data);
+       pi_data->ir_data = ir_data;
+
+       /* Note:
+        * SVM tries to set up for VAPIC mode, but we are in
+        * legacy mode. So, we force legacy mode instead.
+        */
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+               pr_debug("%s: Fall back to using intr legacy remap\n",
+                        __func__);
+               pi_data->is_guest_mode = false;
+       }
+
+       iommu = amd_iommu_rlookup_table[irte_info->devid];
+       if (iommu == NULL)
+               return -EINVAL;
+
+       pi_data->prev_ga_tag = ir_data->cached_ga_tag;
+       if (pi_data->is_guest_mode) {
+               ir_data->ga_root_ptr = (pi_data->base >> 12);
+               ir_data->ga_vector = vcpu_pi_info->vector;
+               ir_data->ga_tag = pi_data->ga_tag;
+               ret = amd_iommu_activate_guest_mode(ir_data);
+               if (!ret)
+                       ir_data->cached_ga_tag = pi_data->ga_tag;
+       } else {
+               ret = amd_iommu_deactivate_guest_mode(ir_data);
+
+               /*
+                * This communicates the ga_tag back to the caller
+                * so that it can do all the necessary clean up.
+                */
+               if (!ret)
+                       ir_data->cached_ga_tag = 0;
+       }
+
+       return ret;
+}
+
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+                              struct amd_ir_data *ir_data,
+                              struct irq_2_irte *irte_info,
+                              struct irq_cfg *cfg)
+{
+
+       /*
+        * Atomically updates the IRTE with the new destination, vector
+        * and flushes the interrupt entry cache.
+        */
+       iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+                                     irte_info->index, cfg->vector,
+                                     cfg->dest_apicid);
+}
+
+static int amd_ir_set_affinity(struct irq_data *data,
+                              const struct cpumask *mask, bool force)
+{
+       struct amd_ir_data *ir_data = data->chip_data;
+       struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+       struct irq_cfg *cfg = irqd_cfg(data);
+       struct irq_data *parent = data->parent_data;
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+       int ret;
+
+       if (!iommu)
+               return -ENODEV;
+
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+               return ret;
+
+       amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
+       /*
+        * After this point, all the interrupts will start arriving
+        * at the new destination. So, time to cleanup the previous
+        * vector allocation.
+        */
+       send_cleanup_vector(cfg);
+
+       return IRQ_SET_MASK_OK_DONE;
+}
+
+static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+       struct amd_ir_data *ir_data = irq_data->chip_data;
+
+       *msg = ir_data->msi_entry;
+}
+
+static struct irq_chip amd_ir_chip = {
+       .name                   = "AMD-IR",
+       .irq_ack                = apic_ack_irq,
+       .irq_set_affinity       = amd_ir_set_affinity,
+       .irq_set_vcpu_affinity  = amd_ir_set_vcpu_affinity,
+       .irq_compose_msi_msg    = ir_compose_msi_msg,
+};
+
+int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+       struct fwnode_handle *fn;
+
+       fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
+       if (!fn)
+               return -ENOMEM;
+       iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
+       irq_domain_free_fwnode(fn);
+       if (!iommu->ir_domain)
+               return -ENOMEM;
+
+       iommu->ir_domain->parent = arch_get_ir_parent_domain();
+       iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
+                                                            "AMD-IR-MSI",
+                                                            iommu->index);
+       return 0;
+}
+
+int amd_iommu_update_ga(int cpu, bool is_run, void *data)
+{
+       unsigned long flags;
+       struct amd_iommu *iommu;
+       struct irq_remap_table *table;
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       int devid = ir_data->irq_2_irte.devid;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+       struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !ref || !entry || !entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (!iommu)
+               return -ENODEV;
+
+       table = get_irq_table(devid);
+       if (!table)
+               return -ENODEV;
+
+       raw_spin_lock_irqsave(&table->lock, flags);
+
+       if (ref->lo.fields_vapic.guest_mode) {
+               if (cpu >= 0) {
+                       ref->lo.fields_vapic.destination =
+                                               APICID_TO_IRTE_DEST_LO(cpu);
+                       ref->hi.fields.destination =
+                                               APICID_TO_IRTE_DEST_HI(cpu);
+               }
+               ref->lo.fields_vapic.is_run = is_run;
+               barrier();
+       }
+
+       raw_spin_unlock_irqrestore(&table->lock, flags);
+
+       iommu_flush_irt(iommu, devid);
+       iommu_completion_wait(iommu);
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
+#endif
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
new file mode 100644 (file)
index 0000000..c8a7b6b
--- /dev/null
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt)     "AMD-Vi: " fmt
+
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "amd_iommu.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
+
+#define MAX_DEVICES            0x10000
+#define PRI_QUEUE_SIZE         512
+
+struct pri_queue {
+       atomic_t inflight;
+       bool finish;
+       int status;
+};
+
+struct pasid_state {
+       struct list_head list;                  /* For global state-list */
+       atomic_t count;                         /* Reference count */
+       unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
+                                                  calls */
+       struct mm_struct *mm;                   /* mm_struct for the faults */
+       struct mmu_notifier mn;                 /* mmu_notifier handle */
+       struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
+       struct device_state *device_state;      /* Link to our device_state */
+       int pasid;                              /* PASID index */
+       bool invalid;                           /* Used during setup and
+                                                  teardown of the pasid */
+       spinlock_t lock;                        /* Protect pri_queues and
+                                                  mmu_notifer_count */
+       wait_queue_head_t wq;                   /* To wait for count == 0 */
+};
+
+struct device_state {
+       struct list_head list;
+       u16 devid;
+       atomic_t count;
+       struct pci_dev *pdev;
+       struct pasid_state **states;
+       struct iommu_domain *domain;
+       int pasid_levels;
+       int max_pasids;
+       amd_iommu_invalid_ppr_cb inv_ppr_cb;
+       amd_iommu_invalidate_ctx inv_ctx_cb;
+       spinlock_t lock;
+       wait_queue_head_t wq;
+};
+
+struct fault {
+       struct work_struct work;
+       struct device_state *dev_state;
+       struct pasid_state *state;
+       struct mm_struct *mm;
+       u64 address;
+       u16 devid;
+       u16 pasid;
+       u16 tag;
+       u16 finish;
+       u16 flags;
+};
+
+static LIST_HEAD(state_list);
+static spinlock_t state_lock;
+
+static struct workqueue_struct *iommu_wq;
+
+static void free_pasid_states(struct device_state *dev_state);
+
+static u16 device_id(struct pci_dev *pdev)
+{
+       u16 devid;
+
+       devid = pdev->bus->number;
+       devid = (devid << 8) | pdev->devfn;
+
+       return devid;
+}
+
+static struct device_state *__get_device_state(u16 devid)
+{
+       struct device_state *dev_state;
+
+       list_for_each_entry(dev_state, &state_list, list) {
+               if (dev_state->devid == devid)
+                       return dev_state;
+       }
+
+       return NULL;
+}
+
+static struct device_state *get_device_state(u16 devid)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state_lock, flags);
+       dev_state = __get_device_state(devid);
+       if (dev_state != NULL)
+               atomic_inc(&dev_state->count);
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+       struct iommu_group *group;
+
+       /*
+        * First detach device from domain - No more PRI requests will arrive
+        * from that device after it is unbound from the IOMMUv2 domain.
+        */
+       group = iommu_group_get(&dev_state->pdev->dev);
+       if (WARN_ON(!group))
+               return;
+
+       iommu_detach_group(dev_state->domain, group);
+
+       iommu_group_put(group);
+
+       /* Everything is down now, free the IOMMUv2 domain */
+       iommu_domain_free(dev_state->domain);
+
+       /* Finally get rid of the device-state */
+       kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+       if (atomic_dec_and_test(&dev_state->count))
+               wake_up(&dev_state->wq);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+                                                 int pasid, bool alloc)
+{
+       struct pasid_state **root, **ptr;
+       int level, index;
+
+       level = dev_state->pasid_levels;
+       root  = dev_state->states;
+
+       while (true) {
+
+               index = (pasid >> (9 * level)) & 0x1ff;
+               ptr   = &root[index];
+
+               if (level == 0)
+                       break;
+
+               if (*ptr == NULL) {
+                       if (!alloc)
+                               return NULL;
+
+                       *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (*ptr == NULL)
+                               return NULL;
+               }
+
+               root   = (struct pasid_state **)*ptr;
+               level -= 1;
+       }
+
+       return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+                          struct pasid_state *pasid_state,
+                          int pasid)
+{
+       struct pasid_state **ptr;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+       ret = -ENOMEM;
+       if (ptr == NULL)
+               goto out_unlock;
+
+       ret = -ENOMEM;
+       if (*ptr != NULL)
+               goto out_unlock;
+
+       *ptr = pasid_state;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+
+       return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, int pasid)
+{
+       struct pasid_state **ptr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+       if (ptr == NULL)
+               goto out_unlock;
+
+       *ptr = NULL;
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+                                          int pasid)
+{
+       struct pasid_state **ptr, *ret = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+       if (ptr == NULL)
+               goto out_unlock;
+
+       ret = *ptr;
+       if (ret)
+               atomic_inc(&ret->count);
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+
+       return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+       kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+       if (atomic_dec_and_test(&pasid_state->count))
+               wake_up(&pasid_state->wq);
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+       atomic_dec(&pasid_state->count);
+       wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
+       free_pasid_state(pasid_state);
+}
+
+static void unbind_pasid(struct pasid_state *pasid_state)
+{
+       struct iommu_domain *domain;
+
+       domain = pasid_state->device_state->domain;
+
+       /*
+        * Mark pasid_state as invalid, no more faults will we added to the
+        * work queue after this is visible everywhere.
+        */
+       pasid_state->invalid = true;
+
+       /* Make sure this is visible */
+       smp_wmb();
+
+       /* After this the device/pasid can't access the mm anymore */
+       amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+
+       /* Make sure no more pending faults are in the queue */
+       flush_workqueue(iommu_wq);
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (tbl[i] == NULL)
+                       continue;
+
+               free_page((unsigned long)tbl[i]);
+       }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+       struct pasid_state **ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (tbl[i] == NULL)
+                       continue;
+
+               ptr = (struct pasid_state **)tbl[i];
+               free_pasid_states_level1(ptr);
+       }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+       struct pasid_state *pasid_state;
+       int i;
+
+       for (i = 0; i < dev_state->max_pasids; ++i) {
+               pasid_state = get_pasid_state(dev_state, i);
+               if (pasid_state == NULL)
+                       continue;
+
+               put_pasid_state(pasid_state);
+
+               /*
+                * This will call the mn_release function and
+                * unbind the PASID
+                */
+               mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+               put_pasid_state_wait(pasid_state); /* Reference taken in
+                                                     amd_iommu_bind_pasid */
+
+               /* Drop reference taken in amd_iommu_bind_pasid */
+               put_device_state(dev_state);
+       }
+
+       if (dev_state->pasid_levels == 2)
+               free_pasid_states_level2(dev_state->states);
+       else if (dev_state->pasid_levels == 1)
+               free_pasid_states_level1(dev_state->states);
+       else
+               BUG_ON(dev_state->pasid_levels != 0);
+
+       free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+       return container_of(mn, struct pasid_state, mn);
+}
+
+static void mn_invalidate_range(struct mmu_notifier *mn,
+                               struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       if ((start ^ (end - 1)) < PAGE_SIZE)
+               amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
+                                    start);
+       else
+               amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       bool run_inv_ctx_cb;
+
+       might_sleep();
+
+       pasid_state    = mn_to_state(mn);
+       dev_state      = pasid_state->device_state;
+       run_inv_ctx_cb = !pasid_state->invalid;
+
+       if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
+               dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+       unbind_pasid(pasid_state);
+}
+
+static const struct mmu_notifier_ops iommu_mn = {
+       .release                = mn_release,
+       .invalidate_range       = mn_invalidate_range,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+                              u16 tag, int status)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       pasid_state->pri[tag].status = status;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+                          struct pasid_state *pasid_state,
+                          u16 tag)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+           pasid_state->pri[tag].finish) {
+               amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+                                      pasid_state->pri[tag].status, tag);
+               pasid_state->pri[tag].finish = false;
+               pasid_state->pri[tag].status = PPR_SUCCESS;
+       }
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void handle_fault_error(struct fault *fault)
+{
+       int status;
+
+       if (!fault->dev_state->inv_ppr_cb) {
+               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+               return;
+       }
+
+       status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+                                             fault->pasid,
+                                             fault->address,
+                                             fault->flags);
+       switch (status) {
+       case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+               set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+               break;
+       case AMD_IOMMU_INV_PRI_RSP_INVALID:
+               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+               break;
+       case AMD_IOMMU_INV_PRI_RSP_FAIL:
+               set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static bool access_error(struct vm_area_struct *vma, struct fault *fault)
+{
+       unsigned long requested = 0;
+
+       if (fault->flags & PPR_FAULT_EXEC)
+               requested |= VM_EXEC;
+
+       if (fault->flags & PPR_FAULT_READ)
+               requested |= VM_READ;
+
+       if (fault->flags & PPR_FAULT_WRITE)
+               requested |= VM_WRITE;
+
+       return (requested & ~vma->vm_flags) != 0;
+}
+
+static void do_fault(struct work_struct *work)
+{
+       struct fault *fault = container_of(work, struct fault, work);
+       struct vm_area_struct *vma;
+       vm_fault_t ret = VM_FAULT_ERROR;
+       unsigned int flags = 0;
+       struct mm_struct *mm;
+       u64 address;
+
+       mm = fault->state->mm;
+       address = fault->address;
+
+       if (fault->flags & PPR_FAULT_USER)
+               flags |= FAULT_FLAG_USER;
+       if (fault->flags & PPR_FAULT_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+       flags |= FAULT_FLAG_REMOTE;
+
+       down_read(&mm->mmap_sem);
+       vma = find_extend_vma(mm, address);
+       if (!vma || address < vma->vm_start)
+               /* failed to get a vma in the right range */
+               goto out;
+
+       /* Check if we have the right permissions on the vma */
+       if (access_error(vma, fault))
+               goto out;
+
+       ret = handle_mm_fault(vma, address, flags);
+out:
+       up_read(&mm->mmap_sem);
+
+       if (ret & VM_FAULT_ERROR)
+               /* failed to service fault */
+               handle_fault_error(fault);
+
+       finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+       put_pasid_state(fault->state);
+
+       kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+       struct amd_iommu_fault *iommu_fault;
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       struct pci_dev *pdev = NULL;
+       unsigned long flags;
+       struct fault *fault;
+       bool finish;
+       u16 tag, devid;
+       int ret;
+
+       iommu_fault = data;
+       tag         = iommu_fault->tag & 0x1ff;
+       finish      = (iommu_fault->tag >> 9) & 1;
+
+       devid = iommu_fault->device_id;
+       pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+                                          devid & 0xff);
+       if (!pdev)
+               return -ENODEV;
+
+       ret = NOTIFY_DONE;
+
+       /* In kdump kernel pci dev is not initialized yet -> send INVALID */
+       if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
+               amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
+                                      PPR_INVALID, tag);
+               goto out;
+       }
+
+       dev_state = get_device_state(iommu_fault->device_id);
+       if (dev_state == NULL)
+               goto out;
+
+       pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+       if (pasid_state == NULL || pasid_state->invalid) {
+               /* We know the device but not the PASID -> send INVALID */
+               amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+                                      PPR_INVALID, tag);
+               goto out_drop_state;
+       }
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       atomic_inc(&pasid_state->pri[tag].inflight);
+       if (finish)
+               pasid_state->pri[tag].finish = true;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+       fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+       if (fault == NULL) {
+               /* We are OOM - send success and let the device re-fault */
+               finish_pri_tag(dev_state, pasid_state, tag);
+               goto out_drop_state;
+       }
+
+       fault->dev_state = dev_state;
+       fault->address   = iommu_fault->address;
+       fault->state     = pasid_state;
+       fault->tag       = tag;
+       fault->finish    = finish;
+       fault->pasid     = iommu_fault->pasid;
+       fault->flags     = iommu_fault->flags;
+       INIT_WORK(&fault->work, do_fault);
+
+       queue_work(iommu_wq, &fault->work);
+
+       ret = NOTIFY_OK;
+
+out_drop_state:
+
+       if (ret != NOTIFY_OK && pasid_state)
+               put_pasid_state(pasid_state);
+
+       put_device_state(dev_state);
+
+out:
+       return ret;
+}
+
+static struct notifier_block ppr_nb = {
+       .notifier_call = ppr_notifier,
+};
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+                        struct task_struct *task)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       struct mm_struct *mm;
+       u16 devid;
+       int ret;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid     = device_id(pdev);
+       dev_state = get_device_state(devid);
+
+       if (dev_state == NULL)
+               return -EINVAL;
+
+       ret = -EINVAL;
+       if (pasid < 0 || pasid >= dev_state->max_pasids)
+               goto out;
+
+       ret = -ENOMEM;
+       pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+       if (pasid_state == NULL)
+               goto out;
+
+
+       atomic_set(&pasid_state->count, 1);
+       init_waitqueue_head(&pasid_state->wq);
+       spin_lock_init(&pasid_state->lock);
+
+       mm                        = get_task_mm(task);
+       pasid_state->mm           = mm;
+       pasid_state->device_state = dev_state;
+       pasid_state->pasid        = pasid;
+       pasid_state->invalid      = true; /* Mark as valid only if we are
+                                            done with setting up the pasid */
+       pasid_state->mn.ops       = &iommu_mn;
+
+       if (pasid_state->mm == NULL)
+               goto out_free;
+
+       mmu_notifier_register(&pasid_state->mn, mm);
+
+       ret = set_pasid_state(dev_state, pasid_state, pasid);
+       if (ret)
+               goto out_unregister;
+
+       ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+                                       __pa(pasid_state->mm->pgd));
+       if (ret)
+               goto out_clear_state;
+
+       /* Now we are ready to handle faults */
+       pasid_state->invalid = false;
+
+       /*
+        * Drop the reference to the mm_struct here. We rely on the
+        * mmu_notifier release call-back to inform us when the mm
+        * is going away.
+        */
+       mmput(mm);
+
+       return 0;
+
+out_clear_state:
+       clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+       mmu_notifier_unregister(&pasid_state->mn, mm);
+       mmput(mm);
+
+out_free:
+       free_pasid_state(pasid_state);
+
+out:
+       put_device_state(dev_state);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       u16 devid;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       devid = device_id(pdev);
+       dev_state = get_device_state(devid);
+       if (dev_state == NULL)
+               return;
+
+       if (pasid < 0 || pasid >= dev_state->max_pasids)
+               goto out;
+
+       pasid_state = get_pasid_state(dev_state, pasid);
+       if (pasid_state == NULL)
+               goto out;
+       /*
+        * Drop reference taken here. We are safe because we still hold
+        * the reference taken in the amd_iommu_bind_pasid function.
+        */
+       put_pasid_state(pasid_state);
+
+       /* Clear the pasid state so that the pasid can be re-used */
+       clear_pasid_state(dev_state, pasid_state->pasid);
+
+       /*
+        * Call mmu_notifier_unregister to drop our reference
+        * to pasid_state->mm
+        */
+       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+       put_pasid_state_wait(pasid_state); /* Reference taken in
+                                             amd_iommu_bind_pasid */
+out:
+       /* Drop reference taken in this function */
+       put_device_state(dev_state);
+
+       /* Drop reference taken in amd_iommu_bind_pasid */
+       put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+       struct device_state *dev_state;
+       struct iommu_group *group;
+       unsigned long flags;
+       int ret, tmp;
+       u16 devid;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       if (pasids <= 0 || pasids > (PASID_MASK + 1))
+               return -EINVAL;
+
+       devid = device_id(pdev);
+
+       dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+       if (dev_state == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&dev_state->lock);
+       init_waitqueue_head(&dev_state->wq);
+       dev_state->pdev  = pdev;
+       dev_state->devid = devid;
+
+       tmp = pasids;
+       for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+               dev_state->pasid_levels += 1;
+
+       atomic_set(&dev_state->count, 1);
+       dev_state->max_pasids = pasids;
+
+       ret = -ENOMEM;
+       dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+       if (dev_state->states == NULL)
+               goto out_free_dev_state;
+
+       dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+       if (dev_state->domain == NULL)
+               goto out_free_states;
+
+       amd_iommu_domain_direct_map(dev_state->domain);
+
+       ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+       if (ret)
+               goto out_free_domain;
+
+       group = iommu_group_get(&pdev->dev);
+       if (!group) {
+               ret = -EINVAL;
+               goto out_free_domain;
+       }
+
+       ret = iommu_attach_group(dev_state->domain, group);
+       if (ret != 0)
+               goto out_drop_group;
+
+       iommu_group_put(group);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       if (__get_device_state(devid) != NULL) {
+               spin_unlock_irqrestore(&state_lock, flags);
+               ret = -EBUSY;
+               goto out_free_domain;
+       }
+
+       list_add_tail(&dev_state->list, &state_list);
+
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return 0;
+
+out_drop_group:
+       iommu_group_put(group);
+
+out_free_domain:
+       iommu_domain_free(dev_state->domain);
+
+out_free_states:
+       free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+       kfree(dev_state);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       dev_state = __get_device_state(devid);
+       if (dev_state == NULL) {
+               spin_unlock_irqrestore(&state_lock, flags);
+               return;
+       }
+
+       list_del(&dev_state->list);
+
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       /* Get rid of any remaining pasid states */
+       free_pasid_states(dev_state);
+
+       put_device_state(dev_state);
+       /*
+        * Wait until the last reference is dropped before freeing
+        * the device state.
+        */
+       wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+       free_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+                                amd_iommu_invalid_ppr_cb cb)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+       int ret;
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       ret = -EINVAL;
+       dev_state = __get_device_state(devid);
+       if (dev_state == NULL)
+               goto out_unlock;
+
+       dev_state->inv_ppr_cb = cb;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+                                   amd_iommu_invalidate_ctx cb)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+       int ret;
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       ret = -EINVAL;
+       dev_state = __get_device_state(devid);
+       if (dev_state == NULL)
+               goto out_unlock;
+
+       dev_state->inv_ctx_cb = cb;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+       int ret;
+
+       pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
+
+       if (!amd_iommu_v2_supported()) {
+               pr_info("AMD IOMMUv2 functionality not available on this system\n");
+               /*
+                * Load anyway to provide the symbols to other modules
+                * which may use AMD IOMMUv2 optionally.
+                */
+               return 0;
+       }
+
+       spin_lock_init(&state_lock);
+
+       ret = -ENOMEM;
+       iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
+       if (iommu_wq == NULL)
+               goto out;
+
+       amd_iommu_register_ppr_notifier(&ppr_nb);
+
+       return 0;
+
+out:
+       return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+       struct device_state *dev_state;
+       int i;
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+       flush_workqueue(iommu_wq);
+
+       /*
+        * The loop below might call flush_workqueue(), so call
+        * destroy_workqueue() after it
+        */
+       for (i = 0; i < MAX_DEVICES; ++i) {
+               dev_state = get_device_state(i);
+
+               if (dev_state == NULL)
+                       continue;
+
+               WARN_ON_ONCE(1);
+
+               put_device_state(dev_state);
+               amd_iommu_free_device(dev_state->pdev);
+       }
+
+       destroy_workqueue(iommu_wq);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
new file mode 100644 (file)
index 0000000..5120ce4
--- /dev/null
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC            1
+
+struct ivrs_quirk_entry {
+       u8 id;
+       u16 devid;
+};
+
+enum {
+       DELL_INSPIRON_7375 = 0,
+       DELL_LATITUDE_5495,
+       LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+       /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+       [DELL_INSPIRON_7375] = {
+               { .id = 4, .devid = 0xa0 },
+               { .id = 5, .devid = 0x2 },
+               {}
+       },
+       /* ivrs_ioapic[4]=00:14.0 */
+       [DELL_LATITUDE_5495] = {
+               { .id = 4, .devid = 0xa0 },
+               {}
+       },
+       /* ivrs_ioapic[32]=00:14.0 */
+       [LENOVO_IDEAPAD_330S_15ARR] = {
+               { .id = 32, .devid = 0xa0 },
+               {}
+       },
+       {}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+       const struct ivrs_quirk_entry *i;
+
+       for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+               add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+
+       return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Inspiron 7375",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Latitude 5495",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
+       {
+               /*
+                * Acer Aspire A315-41 requires the very same workaround as
+                * Dell Latitude 5495
+                */
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Acer Aspire A315-41",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Lenovo ideapad 330S-15ARR",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+       },
+       {}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+       dmi_check_system(ivrs_quirks);
+}
+#endif
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
deleted file mode 100644 (file)
index 311ef71..0000000
+++ /dev/null
@@ -1,4041 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- *         Leo Duran <leo.duran@amd.com>
- */
-
-#define pr_fmt(fmt)     "AMD-Vi: " fmt
-#define dev_fmt(fmt)    pr_fmt(fmt)
-
-#include <linux/ratelimit.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_device.h>
-#include <linux/pci-ats.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
-#include <linux/iommu-helper.h>
-#include <linux/delay.h>
-#include <linux/amd-iommu.h>
-#include <linux/notifier.h>
-#include <linux/export.h>
-#include <linux/irq.h>
-#include <linux/msi.h>
-#include <linux/dma-contiguous.h>
-#include <linux/irqdomain.h>
-#include <linux/percpu.h>
-#include <linux/iova.h>
-#include <asm/irq_remapping.h>
-#include <asm/io_apic.h>
-#include <asm/apic.h>
-#include <asm/hw_irq.h>
-#include <asm/msidef.h>
-#include <asm/proto.h>
-#include <asm/iommu.h>
-#include <asm/gart.h>
-#include <asm/dma.h>
-
-#include "amd_iommu.h"
-#include "irq_remapping.h"
-
-#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-
-#define LOOP_TIMEOUT   100000
-
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN         (1)
-#define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
-
-/* Reserved IOVA ranges */
-#define MSI_RANGE_START                (0xfee00000)
-#define MSI_RANGE_END          (0xfeefffff)
-#define HT_RANGE_START         (0xfd00000000ULL)
-#define HT_RANGE_END           (0xffffffffffULL)
-
-/*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * 512GB Pages are not supported due to a hardware bug
- */
-#define AMD_IOMMU_PGSIZES      ((~0xFFFUL) & ~(2ULL << 38))
-
-#define DEFAULT_PGTABLE_LEVEL  PAGE_MODE_3_LEVEL
-
-static DEFINE_SPINLOCK(pd_bitmap_lock);
-
-/* List of all available dev_data structures */
-static LLIST_HEAD(dev_data_list);
-
-LIST_HEAD(ioapic_map);
-LIST_HEAD(hpet_map);
-LIST_HEAD(acpihid_map);
-
-/*
- * Domain for untranslated devices - only allocated
- * if iommu=pt passed on kernel cmd line.
- */
-const struct iommu_ops amd_iommu_ops;
-
-static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
-int amd_iommu_max_glx_val = -1;
-
-/*
- * general struct to manage commands send to an IOMMU
- */
-struct iommu_cmd {
-       u32 data[4];
-};
-
-struct kmem_cache *amd_iommu_irq_cache;
-
-static void update_domain(struct protection_domain *domain);
-static void detach_device(struct device *dev);
-static void update_and_flush_device_table(struct protection_domain *domain,
-                                         struct domain_pgtable *pgtable);
-
-/****************************************************************************
- *
- * Helper functions
- *
- ****************************************************************************/
-
-static inline u16 get_pci_device_id(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       return pci_dev_id(pdev);
-}
-
-static inline int get_acpihid_device_id(struct device *dev,
-                                       struct acpihid_map_entry **entry)
-{
-       struct acpi_device *adev = ACPI_COMPANION(dev);
-       struct acpihid_map_entry *p;
-
-       if (!adev)
-               return -ENODEV;
-
-       list_for_each_entry(p, &acpihid_map, list) {
-               if (acpi_dev_hid_uid_match(adev, p->hid,
-                                          p->uid[0] ? p->uid : NULL)) {
-                       if (entry)
-                               *entry = p;
-                       return p->devid;
-               }
-       }
-       return -EINVAL;
-}
-
-static inline int get_device_id(struct device *dev)
-{
-       int devid;
-
-       if (dev_is_pci(dev))
-               devid = get_pci_device_id(dev);
-       else
-               devid = get_acpihid_device_id(dev, NULL);
-
-       return devid;
-}
-
-static struct protection_domain *to_pdomain(struct iommu_domain *dom)
-{
-       return container_of(dom, struct protection_domain, domain);
-}
-
-static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
-                                        struct domain_pgtable *pgtable)
-{
-       u64 pt_root = atomic64_read(&domain->pt_root);
-
-       pgtable->root = (u64 *)(pt_root & PAGE_MASK);
-       pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
-static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
-{
-       u64 pt_root;
-
-       /* lowest 3 bits encode pgtable mode */
-       pt_root = mode & 7;
-       pt_root |= (u64)root;
-
-       return pt_root;
-}
-
-static struct iommu_dev_data *alloc_dev_data(u16 devid)
-{
-       struct iommu_dev_data *dev_data;
-
-       dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
-       if (!dev_data)
-               return NULL;
-
-       spin_lock_init(&dev_data->lock);
-       dev_data->devid = devid;
-       ratelimit_default_init(&dev_data->rs);
-
-       llist_add(&dev_data->dev_data_list, &dev_data_list);
-       return dev_data;
-}
-
-static struct iommu_dev_data *search_dev_data(u16 devid)
-{
-       struct iommu_dev_data *dev_data;
-       struct llist_node *node;
-
-       if (llist_empty(&dev_data_list))
-               return NULL;
-
-       node = dev_data_list.first;
-       llist_for_each_entry(dev_data, node, dev_data_list) {
-               if (dev_data->devid == devid)
-                       return dev_data;
-       }
-
-       return NULL;
-}
-
-static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
-       u16 devid = pci_dev_id(pdev);
-
-       if (devid == alias)
-               return 0;
-
-       amd_iommu_rlookup_table[alias] =
-               amd_iommu_rlookup_table[devid];
-       memcpy(amd_iommu_dev_table[alias].data,
-              amd_iommu_dev_table[devid].data,
-              sizeof(amd_iommu_dev_table[alias].data));
-
-       return 0;
-}
-
-static void clone_aliases(struct pci_dev *pdev)
-{
-       if (!pdev)
-               return;
-
-       /*
-        * The IVRS alias stored in the alias table may not be
-        * part of the PCI DMA aliases if it's bus differs
-        * from the original device.
-        */
-       clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
-
-       pci_for_each_dma_alias(pdev, clone_alias, NULL);
-}
-
-static struct pci_dev *setup_aliases(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       u16 ivrs_alias;
-
-       /* For ACPI HID devices, there are no aliases */
-       if (!dev_is_pci(dev))
-               return NULL;
-
-       /*
-        * Add the IVRS alias to the pci aliases if it is on the same
-        * bus. The IVRS table may know about a quirk that we don't.
-        */
-       ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
-       if (ivrs_alias != pci_dev_id(pdev) &&
-           PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
-               pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
-
-       clone_aliases(pdev);
-
-       return pdev;
-}
-
-static struct iommu_dev_data *find_dev_data(u16 devid)
-{
-       struct iommu_dev_data *dev_data;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
-       dev_data = search_dev_data(devid);
-
-       if (dev_data == NULL) {
-               dev_data = alloc_dev_data(devid);
-               if (!dev_data)
-                       return NULL;
-
-               if (translation_pre_enabled(iommu))
-                       dev_data->defer_attach = true;
-       }
-
-       return dev_data;
-}
-
-/*
-* Find or create an IOMMU group for a acpihid device.
-*/
-static struct iommu_group *acpihid_device_group(struct device *dev)
-{
-       struct acpihid_map_entry *p, *entry = NULL;
-       int devid;
-
-       devid = get_acpihid_device_id(dev, &entry);
-       if (devid < 0)
-               return ERR_PTR(devid);
-
-       list_for_each_entry(p, &acpihid_map, list) {
-               if ((devid == p->devid) && p->group)
-                       entry->group = p->group;
-       }
-
-       if (!entry->group)
-               entry->group = generic_device_group(dev);
-       else
-               iommu_group_ref_get(entry->group);
-
-       return entry->group;
-}
-
-static bool pci_iommuv2_capable(struct pci_dev *pdev)
-{
-       static const int caps[] = {
-               PCI_EXT_CAP_ID_PRI,
-               PCI_EXT_CAP_ID_PASID,
-       };
-       int i, pos;
-
-       if (!pci_ats_supported(pdev))
-               return false;
-
-       for (i = 0; i < 2; ++i) {
-               pos = pci_find_ext_capability(pdev, caps[i]);
-               if (pos == 0)
-                       return false;
-       }
-
-       return true;
-}
-
-static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
-{
-       struct iommu_dev_data *dev_data;
-
-       dev_data = dev_iommu_priv_get(&pdev->dev);
-
-       return dev_data->errata & (1 << erratum) ? true : false;
-}
-
-/*
- * This function checks if the driver got a valid device from the caller to
- * avoid dereferencing invalid pointers.
- */
-static bool check_device(struct device *dev)
-{
-       int devid;
-
-       if (!dev)
-               return false;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return false;
-
-       /* Out of our scope? */
-       if (devid > amd_iommu_last_bdf)
-               return false;
-
-       if (amd_iommu_rlookup_table[devid] == NULL)
-               return false;
-
-       return true;
-}
-
-static int iommu_init_device(struct device *dev)
-{
-       struct iommu_dev_data *dev_data;
-       int devid;
-
-       if (dev_iommu_priv_get(dev))
-               return 0;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return devid;
-
-       dev_data = find_dev_data(devid);
-       if (!dev_data)
-               return -ENOMEM;
-
-       dev_data->pdev = setup_aliases(dev);
-
-       /*
-        * By default we use passthrough mode for IOMMUv2 capable device.
-        * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
-        * invalid address), we ignore the capability for the device so
-        * it'll be forced to go into translation mode.
-        */
-       if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
-           dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
-               struct amd_iommu *iommu;
-
-               iommu = amd_iommu_rlookup_table[dev_data->devid];
-               dev_data->iommu_v2 = iommu->is_iommu_v2;
-       }
-
-       dev_iommu_priv_set(dev, dev_data);
-
-       return 0;
-}
-
-static void iommu_ignore_device(struct device *dev)
-{
-       int devid;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return;
-
-       amd_iommu_rlookup_table[devid] = NULL;
-       memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
-
-       setup_aliases(dev);
-}
-
-static void amd_iommu_uninit_device(struct device *dev)
-{
-       struct iommu_dev_data *dev_data;
-
-       dev_data = dev_iommu_priv_get(dev);
-       if (!dev_data)
-               return;
-
-       if (dev_data->domain)
-               detach_device(dev);
-
-       dev_iommu_priv_set(dev, NULL);
-
-       /*
-        * We keep dev_data around for unplugged devices and reuse it when the
-        * device is re-plugged - not doing so would introduce a ton of races.
-        */
-}
-
-/*
- * Helper function to get the first pte of a large mapping
- */
-static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
-                        unsigned long *count)
-{
-       unsigned long pte_mask, pg_size, cnt;
-       u64 *fpte;
-
-       pg_size  = PTE_PAGE_SIZE(*pte);
-       cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
-       pte_mask = ~((cnt << 3) - 1);
-       fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
-
-       if (page_size)
-               *page_size = pg_size;
-
-       if (count)
-               *count = cnt;
-
-       return fpte;
-}
-
-/****************************************************************************
- *
- * Interrupt handling functions
- *
- ****************************************************************************/
-
-static void dump_dte_entry(u16 devid)
-{
-       int i;
-
-       for (i = 0; i < 4; ++i)
-               pr_err("DTE[%d]: %016llx\n", i,
-                       amd_iommu_dev_table[devid].data[i]);
-}
-
-static void dump_command(unsigned long phys_addr)
-{
-       struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
-       int i;
-
-       for (i = 0; i < 4; ++i)
-               pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
-}
-
-static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
-                                       u64 address, int flags)
-{
-       struct iommu_dev_data *dev_data = NULL;
-       struct pci_dev *pdev;
-
-       pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
-                                          devid & 0xff);
-       if (pdev)
-               dev_data = dev_iommu_priv_get(&pdev->dev);
-
-       if (dev_data && __ratelimit(&dev_data->rs)) {
-               pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
-                       domain_id, address, flags);
-       } else if (printk_ratelimit()) {
-               pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       domain_id, address, flags);
-       }
-
-       if (pdev)
-               pci_dev_put(pdev);
-}
-
-static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
-{
-       struct device *dev = iommu->iommu.dev;
-       int type, devid, pasid, flags, tag;
-       volatile u32 *event = __evt;
-       int count = 0;
-       u64 address;
-
-retry:
-       type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
-       devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-       pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
-                 (event[1] & EVENT_DOMID_MASK_LO);
-       flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
-       address = (u64)(((u64)event[3]) << 32) | event[2];
-
-       if (type == 0) {
-               /* Did we hit the erratum? */
-               if (++count == LOOP_TIMEOUT) {
-                       pr_err("No event written to event log\n");
-                       return;
-               }
-               udelay(1);
-               goto retry;
-       }
-
-       if (type == EVENT_TYPE_IO_FAULT) {
-               amd_iommu_report_page_fault(devid, pasid, address, flags);
-               return;
-       }
-
-       switch (type) {
-       case EVENT_TYPE_ILL_DEV:
-               dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       pasid, address, flags);
-               dump_dte_entry(devid);
-               break;
-       case EVENT_TYPE_DEV_TAB_ERR:
-               dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
-                       "address=0x%llx flags=0x%04x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       address, flags);
-               break;
-       case EVENT_TYPE_PAGE_TAB_ERR:
-               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       pasid, address, flags);
-               break;
-       case EVENT_TYPE_ILL_CMD:
-               dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
-               dump_command(address);
-               break;
-       case EVENT_TYPE_CMD_HARD_ERR:
-               dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
-                       address, flags);
-               break;
-       case EVENT_TYPE_IOTLB_INV_TO:
-               dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       address);
-               break;
-       case EVENT_TYPE_INV_DEV_REQ:
-               dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       pasid, address, flags);
-               break;
-       case EVENT_TYPE_INV_PPR_REQ:
-               pasid = PPR_PASID(*((u64 *)__evt));
-               tag = event[1] & 0x03FF;
-               dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
-                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       pasid, address, flags, tag);
-               break;
-       default:
-               dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
-                       event[0], event[1], event[2], event[3]);
-       }
-
-       memset(__evt, 0, 4 * sizeof(u32));
-}
-
-static void iommu_poll_events(struct amd_iommu *iommu)
-{
-       u32 head, tail;
-
-       head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-       tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
-
-       while (head != tail) {
-               iommu_print_event(iommu, iommu->evt_buf + head);
-               head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
-       }
-
-       writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-}
-
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
-{
-       struct amd_iommu_fault fault;
-
-       if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
-               pr_err_ratelimited("Unknown PPR request received\n");
-               return;
-       }
-
-       fault.address   = raw[1];
-       fault.pasid     = PPR_PASID(raw[0]);
-       fault.device_id = PPR_DEVID(raw[0]);
-       fault.tag       = PPR_TAG(raw[0]);
-       fault.flags     = PPR_FLAGS(raw[0]);
-
-       atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
-}
-
-static void iommu_poll_ppr_log(struct amd_iommu *iommu)
-{
-       u32 head, tail;
-
-       if (iommu->ppr_log == NULL)
-               return;
-
-       head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-       tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-
-       while (head != tail) {
-               volatile u64 *raw;
-               u64 entry[2];
-               int i;
-
-               raw = (u64 *)(iommu->ppr_log + head);
-
-               /*
-                * Hardware bug: Interrupt may arrive before the entry is
-                * written to memory. If this happens we need to wait for the
-                * entry to arrive.
-                */
-               for (i = 0; i < LOOP_TIMEOUT; ++i) {
-                       if (PPR_REQ_TYPE(raw[0]) != 0)
-                               break;
-                       udelay(1);
-               }
-
-               /* Avoid memcpy function-call overhead */
-               entry[0] = raw[0];
-               entry[1] = raw[1];
-
-               /*
-                * To detect the hardware bug we need to clear the entry
-                * back to zero.
-                */
-               raw[0] = raw[1] = 0UL;
-
-               /* Update head pointer of hardware ring-buffer */
-               head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
-               writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-
-               /* Handle PPR entry */
-               iommu_handle_ppr_entry(iommu, entry);
-
-               /* Refresh ring-buffer information */
-               head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-               tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-       }
-}
-
-#ifdef CONFIG_IRQ_REMAP
-static int (*iommu_ga_log_notifier)(u32);
-
-int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
-{
-       iommu_ga_log_notifier = notifier;
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
-
-static void iommu_poll_ga_log(struct amd_iommu *iommu)
-{
-       u32 head, tail, cnt = 0;
-
-       if (iommu->ga_log == NULL)
-               return;
-
-       head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
-       tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
-
-       while (head != tail) {
-               volatile u64 *raw;
-               u64 log_entry;
-
-               raw = (u64 *)(iommu->ga_log + head);
-               cnt++;
-
-               /* Avoid memcpy function-call overhead */
-               log_entry = *raw;
-
-               /* Update head pointer of hardware ring-buffer */
-               head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
-               writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
-
-               /* Handle GA entry */
-               switch (GA_REQ_TYPE(log_entry)) {
-               case GA_GUEST_NR:
-                       if (!iommu_ga_log_notifier)
-                               break;
-
-                       pr_debug("%s: devid=%#x, ga_tag=%#x\n",
-                                __func__, GA_DEVID(log_entry),
-                                GA_TAG(log_entry));
-
-                       if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
-                               pr_err("GA log notifier failed.\n");
-                       break;
-               default:
-                       break;
-               }
-       }
-}
-#endif /* CONFIG_IRQ_REMAP */
-
-#define AMD_IOMMU_INT_MASK     \
-       (MMIO_STATUS_EVT_INT_MASK | \
-        MMIO_STATUS_PPR_INT_MASK | \
-        MMIO_STATUS_GALOG_INT_MASK)
-
-irqreturn_t amd_iommu_int_thread(int irq, void *data)
-{
-       struct amd_iommu *iommu = (struct amd_iommu *) data;
-       u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-
-       while (status & AMD_IOMMU_INT_MASK) {
-               /* Enable EVT and PPR and GA interrupts again */
-               writel(AMD_IOMMU_INT_MASK,
-                       iommu->mmio_base + MMIO_STATUS_OFFSET);
-
-               if (status & MMIO_STATUS_EVT_INT_MASK) {
-                       pr_devel("Processing IOMMU Event Log\n");
-                       iommu_poll_events(iommu);
-               }
-
-               if (status & MMIO_STATUS_PPR_INT_MASK) {
-                       pr_devel("Processing IOMMU PPR Log\n");
-                       iommu_poll_ppr_log(iommu);
-               }
-
-#ifdef CONFIG_IRQ_REMAP
-               if (status & MMIO_STATUS_GALOG_INT_MASK) {
-                       pr_devel("Processing IOMMU GA Log\n");
-                       iommu_poll_ga_log(iommu);
-               }
-#endif
-
-               /*
-                * Hardware bug: ERBT1312
-                * When re-enabling interrupt (by writing 1
-                * to clear the bit), the hardware might also try to set
-                * the interrupt bit in the event status register.
-                * In this scenario, the bit will be set, and disable
-                * subsequent interrupts.
-                *
-                * Workaround: The IOMMU driver should read back the
-                * status register and check if the interrupt bits are cleared.
-                * If not, driver will need to go through the interrupt handler
-                * again and re-clear the bits
-                */
-               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-       }
-       return IRQ_HANDLED;
-}
-
-irqreturn_t amd_iommu_int_handler(int irq, void *data)
-{
-       return IRQ_WAKE_THREAD;
-}
-
-/****************************************************************************
- *
- * IOMMU command queuing functions
- *
- ****************************************************************************/
-
-static int wait_on_sem(volatile u64 *sem)
-{
-       int i = 0;
-
-       while (*sem == 0 && i < LOOP_TIMEOUT) {
-               udelay(1);
-               i += 1;
-       }
-
-       if (i == LOOP_TIMEOUT) {
-               pr_alert("Completion-Wait loop timed out\n");
-               return -EIO;
-       }
-
-       return 0;
-}
-
-static void copy_cmd_to_buffer(struct amd_iommu *iommu,
-                              struct iommu_cmd *cmd)
-{
-       u8 *target;
-       u32 tail;
-
-       /* Copy command to buffer */
-       tail = iommu->cmd_buf_tail;
-       target = iommu->cmd_buf + tail;
-       memcpy(target, cmd, sizeof(*cmd));
-
-       tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
-       iommu->cmd_buf_tail = tail;
-
-       /* Tell the IOMMU about it */
-       writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
-}
-
-static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
-{
-       u64 paddr = iommu_virt_to_phys((void *)address);
-
-       WARN_ON(address & 0x7ULL);
-
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
-       cmd->data[1] = upper_32_bits(paddr);
-       cmd->data[2] = 1;
-       CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
-}
-
-static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
-{
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->data[0] = devid;
-       CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
-}
-
-static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
-                                 size_t size, u16 domid, int pde)
-{
-       u64 pages;
-       bool s;
-
-       pages = iommu_num_pages(address, size, PAGE_SIZE);
-       s     = false;
-
-       if (pages > 1) {
-               /*
-                * If we have to flush more than one page, flush all
-                * TLB entries for this domain
-                */
-               address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
-               s = true;
-       }
-
-       address &= PAGE_MASK;
-
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->data[1] |= domid;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[3]  = upper_32_bits(address);
-       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-       if (s) /* size bit - we flush more than one 4kb page */
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
-}
-
-static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
-                                 u64 address, size_t size)
-{
-       u64 pages;
-       bool s;
-
-       pages = iommu_num_pages(address, size, PAGE_SIZE);
-       s     = false;
-
-       if (pages > 1) {
-               /*
-                * If we have to flush more than one page, flush all
-                * TLB entries for this domain
-                */
-               address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
-               s = true;
-       }
-
-       address &= PAGE_MASK;
-
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->data[0]  = devid;
-       cmd->data[0] |= (qdep & 0xff) << 24;
-       cmd->data[1]  = devid;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[3]  = upper_32_bits(address);
-       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-       if (s)
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-}
-
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
-                                 u64 address, bool size)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       address &= ~(0xfffULL);
-
-       cmd->data[0]  = pasid;
-       cmd->data[1]  = domid;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[3]  = upper_32_bits(address);
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
-       if (size)
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
-                                 int qdep, u64 address, bool size)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       address &= ~(0xfffULL);
-
-       cmd->data[0]  = devid;
-       cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
-       cmd->data[0] |= (qdep  & 0xff) << 24;
-       cmd->data[1]  = devid;
-       cmd->data[1] |= (pasid & 0xff) << 16;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
-       cmd->data[3]  = upper_32_bits(address);
-       if (size)
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-}
-
-static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
-                              int status, int tag, bool gn)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       cmd->data[0]  = devid;
-       if (gn) {
-               cmd->data[1]  = pasid;
-               cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
-       }
-       cmd->data[3]  = tag & 0x1ff;
-       cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
-
-       CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
-}
-
-static void build_inv_all(struct iommu_cmd *cmd)
-{
-       memset(cmd, 0, sizeof(*cmd));
-       CMD_SET_TYPE(cmd, CMD_INV_ALL);
-}
-
-static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
-{
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->data[0] = devid;
-       CMD_SET_TYPE(cmd, CMD_INV_IRT);
-}
-
-/*
- * Writes the command to the IOMMUs command buffer and informs the
- * hardware about the new command.
- */
-static int __iommu_queue_command_sync(struct amd_iommu *iommu,
-                                     struct iommu_cmd *cmd,
-                                     bool sync)
-{
-       unsigned int count = 0;
-       u32 left, next_tail;
-
-       next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
-again:
-       left      = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
-
-       if (left <= 0x20) {
-               /* Skip udelay() the first time around */
-               if (count++) {
-                       if (count == LOOP_TIMEOUT) {
-                               pr_err("Command buffer timeout\n");
-                               return -EIO;
-                       }
-
-                       udelay(1);
-               }
-
-               /* Update head and recheck remaining space */
-               iommu->cmd_buf_head = readl(iommu->mmio_base +
-                                           MMIO_CMD_HEAD_OFFSET);
-
-               goto again;
-       }
-
-       copy_cmd_to_buffer(iommu, cmd);
-
-       /* Do we need to make sure all commands are processed? */
-       iommu->need_sync = sync;
-
-       return 0;
-}
-
-static int iommu_queue_command_sync(struct amd_iommu *iommu,
-                                   struct iommu_cmd *cmd,
-                                   bool sync)
-{
-       unsigned long flags;
-       int ret;
-
-       raw_spin_lock_irqsave(&iommu->lock, flags);
-       ret = __iommu_queue_command_sync(iommu, cmd, sync);
-       raw_spin_unlock_irqrestore(&iommu->lock, flags);
-
-       return ret;
-}
-
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
-{
-       return iommu_queue_command_sync(iommu, cmd, true);
-}
-
-/*
- * This function queues a completion wait command into the command
- * buffer of an IOMMU
- */
-static int iommu_completion_wait(struct amd_iommu *iommu)
-{
-       struct iommu_cmd cmd;
-       unsigned long flags;
-       int ret;
-
-       if (!iommu->need_sync)
-               return 0;
-
-
-       build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
-
-       raw_spin_lock_irqsave(&iommu->lock, flags);
-
-       iommu->cmd_sem = 0;
-
-       ret = __iommu_queue_command_sync(iommu, &cmd, false);
-       if (ret)
-               goto out_unlock;
-
-       ret = wait_on_sem(&iommu->cmd_sem);
-
-out_unlock:
-       raw_spin_unlock_irqrestore(&iommu->lock, flags);
-
-       return ret;
-}
-
-static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
-{
-       struct iommu_cmd cmd;
-
-       build_inv_dte(&cmd, devid);
-
-       return iommu_queue_command(iommu, &cmd);
-}
-
-static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
-{
-       u32 devid;
-
-       for (devid = 0; devid <= 0xffff; ++devid)
-               iommu_flush_dte(iommu, devid);
-
-       iommu_completion_wait(iommu);
-}
-
-/*
- * This function uses heavy locking and may disable irqs for some time. But
- * this is no issue because it is only called during resume.
- */
-static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
-{
-       u32 dom_id;
-
-       for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
-               struct iommu_cmd cmd;
-               build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                                     dom_id, 1);
-               iommu_queue_command(iommu, &cmd);
-       }
-
-       iommu_completion_wait(iommu);
-}
-
-static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
-{
-       struct iommu_cmd cmd;
-
-       build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                             dom_id, 1);
-       iommu_queue_command(iommu, &cmd);
-
-       iommu_completion_wait(iommu);
-}
-
-static void amd_iommu_flush_all(struct amd_iommu *iommu)
-{
-       struct iommu_cmd cmd;
-
-       build_inv_all(&cmd);
-
-       iommu_queue_command(iommu, &cmd);
-       iommu_completion_wait(iommu);
-}
-
-static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
-{
-       struct iommu_cmd cmd;
-
-       build_inv_irt(&cmd, devid);
-
-       iommu_queue_command(iommu, &cmd);
-}
-
-static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
-{
-       u32 devid;
-
-       for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
-               iommu_flush_irt(iommu, devid);
-
-       iommu_completion_wait(iommu);
-}
-
-void iommu_flush_all_caches(struct amd_iommu *iommu)
-{
-       if (iommu_feature(iommu, FEATURE_IA)) {
-               amd_iommu_flush_all(iommu);
-       } else {
-               amd_iommu_flush_dte_all(iommu);
-               amd_iommu_flush_irt_all(iommu);
-               amd_iommu_flush_tlb_all(iommu);
-       }
-}
-
-/*
- * Command send function for flushing on-device TLB
- */
-static int device_flush_iotlb(struct iommu_dev_data *dev_data,
-                             u64 address, size_t size)
-{
-       struct amd_iommu *iommu;
-       struct iommu_cmd cmd;
-       int qdep;
-
-       qdep     = dev_data->ats.qdep;
-       iommu    = amd_iommu_rlookup_table[dev_data->devid];
-
-       build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
-
-       return iommu_queue_command(iommu, &cmd);
-}
-
-static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
-       struct amd_iommu *iommu = data;
-
-       return iommu_flush_dte(iommu, alias);
-}
-
-/*
- * Command send function for invalidating a device table entry
- */
-static int device_flush_dte(struct iommu_dev_data *dev_data)
-{
-       struct amd_iommu *iommu;
-       u16 alias;
-       int ret;
-
-       iommu = amd_iommu_rlookup_table[dev_data->devid];
-
-       if (dev_data->pdev)
-               ret = pci_for_each_dma_alias(dev_data->pdev,
-                                            device_flush_dte_alias, iommu);
-       else
-               ret = iommu_flush_dte(iommu, dev_data->devid);
-       if (ret)
-               return ret;
-
-       alias = amd_iommu_alias_table[dev_data->devid];
-       if (alias != dev_data->devid) {
-               ret = iommu_flush_dte(iommu, alias);
-               if (ret)
-                       return ret;
-       }
-
-       if (dev_data->ats.enabled)
-               ret = device_flush_iotlb(dev_data, 0, ~0UL);
-
-       return ret;
-}
-
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
-                                u64 address, size_t size, int pde)
-{
-       struct iommu_dev_data *dev_data;
-       struct iommu_cmd cmd;
-       int ret = 0, i;
-
-       build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
-
-       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
-               if (!domain->dev_iommu[i])
-                       continue;
-
-               /*
-                * Devices of this domain are behind this IOMMU
-                * We need a TLB flush
-                */
-               ret |= iommu_queue_command(amd_iommus[i], &cmd);
-       }
-
-       list_for_each_entry(dev_data, &domain->dev_list, list) {
-
-               if (!dev_data->ats.enabled)
-                       continue;
-
-               ret |= device_flush_iotlb(dev_data, address, size);
-       }
-
-       WARN_ON(ret);
-}
-
-static void domain_flush_pages(struct protection_domain *domain,
-                              u64 address, size_t size)
-{
-       __domain_flush_pages(domain, address, size, 0);
-}
-
-/* Flush the whole IO/TLB for a given protection domain - including PDE */
-static void domain_flush_tlb_pde(struct protection_domain *domain)
-{
-       __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
-}
-
-static void domain_flush_complete(struct protection_domain *domain)
-{
-       int i;
-
-       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
-               if (domain && !domain->dev_iommu[i])
-                       continue;
-
-               /*
-                * Devices of this domain are behind this IOMMU
-                * We need to wait for completion of all commands.
-                */
-               iommu_completion_wait(amd_iommus[i]);
-       }
-}
-
-/* Flush the not present cache if it exists */
-static void domain_flush_np_cache(struct protection_domain *domain,
-               dma_addr_t iova, size_t size)
-{
-       if (unlikely(amd_iommu_np_cache)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&domain->lock, flags);
-               domain_flush_pages(domain, iova, size);
-               domain_flush_complete(domain);
-               spin_unlock_irqrestore(&domain->lock, flags);
-       }
-}
-
-
-/*
- * This function flushes the DTEs for all devices in domain
- */
-static void domain_flush_devices(struct protection_domain *domain)
-{
-       struct iommu_dev_data *dev_data;
-
-       list_for_each_entry(dev_data, &domain->dev_list, list)
-               device_flush_dte(dev_data);
-}
-
-/****************************************************************************
- *
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
-static void free_page_list(struct page *freelist)
-{
-       while (freelist != NULL) {
-               unsigned long p = (unsigned long)page_address(freelist);
-               freelist = freelist->freelist;
-               free_page(p);
-       }
-}
-
-static struct page *free_pt_page(unsigned long pt, struct page *freelist)
-{
-       struct page *p = virt_to_page((void *)pt);
-
-       p->freelist = freelist;
-
-       return p;
-}
-
-#define DEFINE_FREE_PT_FN(LVL, FN)                                             \
-static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist)  \
-{                                                                              \
-       unsigned long p;                                                        \
-       u64 *pt;                                                                \
-       int i;                                                                  \
-                                                                               \
-       pt = (u64 *)__pt;                                                       \
-                                                                               \
-       for (i = 0; i < 512; ++i) {                                             \
-               /* PTE present? */                                              \
-               if (!IOMMU_PTE_PRESENT(pt[i]))                                  \
-                       continue;                                               \
-                                                                               \
-               /* Large PTE? */                                                \
-               if (PM_PTE_LEVEL(pt[i]) == 0 ||                                 \
-                   PM_PTE_LEVEL(pt[i]) == 7)                                   \
-                       continue;                                               \
-                                                                               \
-               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);                       \
-               freelist = FN(p, freelist);                                     \
-       }                                                                       \
-                                                                               \
-       return free_pt_page((unsigned long)pt, freelist);                       \
-}
-
-DEFINE_FREE_PT_FN(l2, free_pt_page)
-DEFINE_FREE_PT_FN(l3, free_pt_l2)
-DEFINE_FREE_PT_FN(l4, free_pt_l3)
-DEFINE_FREE_PT_FN(l5, free_pt_l4)
-DEFINE_FREE_PT_FN(l6, free_pt_l5)
-
-static struct page *free_sub_pt(unsigned long root, int mode,
-                               struct page *freelist)
-{
-       switch (mode) {
-       case PAGE_MODE_NONE:
-       case PAGE_MODE_7_LEVEL:
-               break;
-       case PAGE_MODE_1_LEVEL:
-               freelist = free_pt_page(root, freelist);
-               break;
-       case PAGE_MODE_2_LEVEL:
-               freelist = free_pt_l2(root, freelist);
-               break;
-       case PAGE_MODE_3_LEVEL:
-               freelist = free_pt_l3(root, freelist);
-               break;
-       case PAGE_MODE_4_LEVEL:
-               freelist = free_pt_l4(root, freelist);
-               break;
-       case PAGE_MODE_5_LEVEL:
-               freelist = free_pt_l5(root, freelist);
-               break;
-       case PAGE_MODE_6_LEVEL:
-               freelist = free_pt_l6(root, freelist);
-               break;
-       default:
-               BUG();
-       }
-
-       return freelist;
-}
-
-static void free_pagetable(struct domain_pgtable *pgtable)
-{
-       struct page *freelist = NULL;
-       unsigned long root;
-
-       if (pgtable->mode == PAGE_MODE_NONE)
-               return;
-
-       BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
-              pgtable->mode > PAGE_MODE_6_LEVEL);
-
-       root = (unsigned long)pgtable->root;
-       freelist = free_sub_pt(root, pgtable->mode, freelist);
-
-       free_page_list(freelist);
-}
-
-/*
- * This function is used to add another level to an IO page table. Adding
- * another level increases the size of the address space by 9 bits to a size up
- * to 64 bits.
- */
-static bool increase_address_space(struct protection_domain *domain,
-                                  unsigned long address,
-                                  gfp_t gfp)
-{
-       struct domain_pgtable pgtable;
-       unsigned long flags;
-       bool ret = true;
-       u64 *pte, root;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-
-       if (address <= PM_LEVEL_SIZE(pgtable.mode))
-               goto out;
-
-       ret = false;
-       if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
-               goto out;
-
-       pte = (void *)get_zeroed_page(gfp);
-       if (!pte)
-               goto out;
-
-       *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
-
-       pgtable.root  = pte;
-       pgtable.mode += 1;
-       update_and_flush_device_table(domain, &pgtable);
-       domain_flush_complete(domain);
-
-       /*
-        * Device Table needs to be updated and flushed before the new root can
-        * be published.
-        */
-       root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
-       atomic64_set(&domain->pt_root, root);
-
-       ret = true;
-
-out:
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-
-static u64 *alloc_pte(struct protection_domain *domain,
-                     unsigned long address,
-                     unsigned long page_size,
-                     u64 **pte_page,
-                     gfp_t gfp,
-                     bool *updated)
-{
-       struct domain_pgtable pgtable;
-       int level, end_lvl;
-       u64 *pte, *page;
-
-       BUG_ON(!is_power_of_2(page_size));
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-
-       while (address > PM_LEVEL_SIZE(pgtable.mode)) {
-               /*
-                * Return an error if there is no memory to update the
-                * page-table.
-                */
-               if (!increase_address_space(domain, address, gfp))
-                       return NULL;
-
-               /* Read new values to check if update was successful */
-               amd_iommu_domain_get_pgtable(domain, &pgtable);
-       }
-
-
-       level   = pgtable.mode - 1;
-       pte     = &pgtable.root[PM_LEVEL_INDEX(level, address)];
-       address = PAGE_SIZE_ALIGN(address, page_size);
-       end_lvl = PAGE_SIZE_LEVEL(page_size);
-
-       while (level > end_lvl) {
-               u64 __pte, __npte;
-               int pte_level;
-
-               __pte     = *pte;
-               pte_level = PM_PTE_LEVEL(__pte);
-
-               /*
-                * If we replace a series of large PTEs, we need
-                * to tear down all of them.
-                */
-               if (IOMMU_PTE_PRESENT(__pte) &&
-                   pte_level == PAGE_MODE_7_LEVEL) {
-                       unsigned long count, i;
-                       u64 *lpte;
-
-                       lpte = first_pte_l7(pte, NULL, &count);
-
-                       /*
-                        * Unmap the replicated PTEs that still match the
-                        * original large mapping
-                        */
-                       for (i = 0; i < count; ++i)
-                               cmpxchg64(&lpte[i], __pte, 0ULL);
-
-                       *updated = true;
-                       continue;
-               }
-
-               if (!IOMMU_PTE_PRESENT(__pte) ||
-                   pte_level == PAGE_MODE_NONE) {
-                       page = (u64 *)get_zeroed_page(gfp);
-
-                       if (!page)
-                               return NULL;
-
-                       __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
-
-                       /* pte could have been changed somewhere. */
-                       if (cmpxchg64(pte, __pte, __npte) != __pte)
-                               free_page((unsigned long)page);
-                       else if (IOMMU_PTE_PRESENT(__pte))
-                               *updated = true;
-
-                       continue;
-               }
-
-               /* No level skipping support yet */
-               if (pte_level != level)
-                       return NULL;
-
-               level -= 1;
-
-               pte = IOMMU_PTE_PAGE(__pte);
-
-               if (pte_page && level == end_lvl)
-                       *pte_page = pte;
-
-               pte = &pte[PM_LEVEL_INDEX(level, address)];
-       }
-
-       return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address. If
- * there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct protection_domain *domain,
-                     unsigned long address,
-                     unsigned long *page_size)
-{
-       struct domain_pgtable pgtable;
-       int level;
-       u64 *pte;
-
-       *page_size = 0;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-
-       if (address > PM_LEVEL_SIZE(pgtable.mode))
-               return NULL;
-
-       level      =  pgtable.mode - 1;
-       pte        = &pgtable.root[PM_LEVEL_INDEX(level, address)];
-       *page_size =  PTE_LEVEL_PAGE_SIZE(level);
-
-       while (level > 0) {
-
-               /* Not Present */
-               if (!IOMMU_PTE_PRESENT(*pte))
-                       return NULL;
-
-               /* Large PTE */
-               if (PM_PTE_LEVEL(*pte) == 7 ||
-                   PM_PTE_LEVEL(*pte) == 0)
-                       break;
-
-               /* No level skipping support yet */
-               if (PM_PTE_LEVEL(*pte) != level)
-                       return NULL;
-
-               level -= 1;
-
-               /* Walk to the next level */
-               pte        = IOMMU_PTE_PAGE(*pte);
-               pte        = &pte[PM_LEVEL_INDEX(level, address)];
-               *page_size = PTE_LEVEL_PAGE_SIZE(level);
-       }
-
-       /*
-        * If we have a series of large PTEs, make
-        * sure to return a pointer to the first one.
-        */
-       if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
-               pte = first_pte_l7(pte, page_size, NULL);
-
-       return pte;
-}
-
-static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
-{
-       unsigned long pt;
-       int mode;
-
-       while (cmpxchg64(pte, pteval, 0) != pteval) {
-               pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
-               pteval = *pte;
-       }
-
-       if (!IOMMU_PTE_PRESENT(pteval))
-               return freelist;
-
-       pt   = (unsigned long)IOMMU_PTE_PAGE(pteval);
-       mode = IOMMU_PTE_MODE(pteval);
-
-       return free_sub_pt(pt, mode, freelist);
-}
-
-/*
- * Generic mapping functions. It maps a physical address into a DMA
- * address space. It allocates the page table pages if necessary.
- * In the future it can be extended to a generic mapping function
- * supporting all features of AMD IOMMU page tables like level skipping
- * and full 64 bit address spaces.
- */
-static int iommu_map_page(struct protection_domain *dom,
-                         unsigned long bus_addr,
-                         unsigned long phys_addr,
-                         unsigned long page_size,
-                         int prot,
-                         gfp_t gfp)
-{
-       struct page *freelist = NULL;
-       bool updated = false;
-       u64 __pte, *pte;
-       int ret, i, count;
-
-       BUG_ON(!IS_ALIGNED(bus_addr, page_size));
-       BUG_ON(!IS_ALIGNED(phys_addr, page_size));
-
-       ret = -EINVAL;
-       if (!(prot & IOMMU_PROT_MASK))
-               goto out;
-
-       count = PAGE_SIZE_PTE_COUNT(page_size);
-       pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
-
-       ret = -ENOMEM;
-       if (!pte)
-               goto out;
-
-       for (i = 0; i < count; ++i)
-               freelist = free_clear_pte(&pte[i], pte[i], freelist);
-
-       if (freelist != NULL)
-               updated = true;
-
-       if (count > 1) {
-               __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
-               __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-       } else
-               __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-
-       if (prot & IOMMU_PROT_IR)
-               __pte |= IOMMU_PTE_IR;
-       if (prot & IOMMU_PROT_IW)
-               __pte |= IOMMU_PTE_IW;
-
-       for (i = 0; i < count; ++i)
-               pte[i] = __pte;
-
-       ret = 0;
-
-out:
-       if (updated) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dom->lock, flags);
-               /*
-                * Flush domain TLB(s) and wait for completion. Any Device-Table
-                * Updates and flushing already happened in
-                * increase_address_space().
-                */
-               domain_flush_tlb_pde(dom);
-               domain_flush_complete(dom);
-               spin_unlock_irqrestore(&dom->lock, flags);
-       }
-
-       /* Everything flushed out, free pages now */
-       free_page_list(freelist);
-
-       return ret;
-}
-
-static unsigned long iommu_unmap_page(struct protection_domain *dom,
-                                     unsigned long bus_addr,
-                                     unsigned long page_size)
-{
-       unsigned long long unmapped;
-       unsigned long unmap_size;
-       u64 *pte;
-
-       BUG_ON(!is_power_of_2(page_size));
-
-       unmapped = 0;
-
-       while (unmapped < page_size) {
-
-               pte = fetch_pte(dom, bus_addr, &unmap_size);
-
-               if (pte) {
-                       int i, count;
-
-                       count = PAGE_SIZE_PTE_COUNT(unmap_size);
-                       for (i = 0; i < count; i++)
-                               pte[i] = 0ULL;
-               }
-
-               bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
-               unmapped += unmap_size;
-       }
-
-       BUG_ON(unmapped && !is_power_of_2(unmapped));
-
-       return unmapped;
-}
-
-/****************************************************************************
- *
- * The next functions belong to the domain allocation. A domain is
- * allocated for every IOMMU as the default domain. If device isolation
- * is enabled, every device get its own domain. The most important thing
- * about domains is the page table mapping the DMA address space they
- * contain.
- *
- ****************************************************************************/
-
-static u16 domain_id_alloc(void)
-{
-       int id;
-
-       spin_lock(&pd_bitmap_lock);
-       id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
-       BUG_ON(id == 0);
-       if (id > 0 && id < MAX_DOMAIN_ID)
-               __set_bit(id, amd_iommu_pd_alloc_bitmap);
-       else
-               id = 0;
-       spin_unlock(&pd_bitmap_lock);
-
-       return id;
-}
-
-static void domain_id_free(int id)
-{
-       spin_lock(&pd_bitmap_lock);
-       if (id > 0 && id < MAX_DOMAIN_ID)
-               __clear_bit(id, amd_iommu_pd_alloc_bitmap);
-       spin_unlock(&pd_bitmap_lock);
-}
-
-static void free_gcr3_tbl_level1(u64 *tbl)
-{
-       u64 *ptr;
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (!(tbl[i] & GCR3_VALID))
-                       continue;
-
-               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
-
-               free_page((unsigned long)ptr);
-       }
-}
-
-static void free_gcr3_tbl_level2(u64 *tbl)
-{
-       u64 *ptr;
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (!(tbl[i] & GCR3_VALID))
-                       continue;
-
-               ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
-
-               free_gcr3_tbl_level1(ptr);
-       }
-}
-
-static void free_gcr3_table(struct protection_domain *domain)
-{
-       if (domain->glx == 2)
-               free_gcr3_tbl_level2(domain->gcr3_tbl);
-       else if (domain->glx == 1)
-               free_gcr3_tbl_level1(domain->gcr3_tbl);
-       else
-               BUG_ON(domain->glx != 0);
-
-       free_page((unsigned long)domain->gcr3_tbl);
-}
-
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
-                         struct domain_pgtable *pgtable,
-                         bool ats, bool ppr)
-{
-       u64 pte_root = 0;
-       u64 flags = 0;
-       u32 old_domid;
-
-       if (pgtable->mode != PAGE_MODE_NONE)
-               pte_root = iommu_virt_to_phys(pgtable->root);
-
-       pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
-                   << DEV_ENTRY_MODE_SHIFT;
-       pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
-
-       flags = amd_iommu_dev_table[devid].data[1];
-
-       if (ats)
-               flags |= DTE_FLAG_IOTLB;
-
-       if (ppr) {
-               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
-               if (iommu_feature(iommu, FEATURE_EPHSUP))
-                       pte_root |= 1ULL << DEV_ENTRY_PPR;
-       }
-
-       if (domain->flags & PD_IOMMUV2_MASK) {
-               u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
-               u64 glx  = domain->glx;
-               u64 tmp;
-
-               pte_root |= DTE_FLAG_GV;
-               pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
-
-               /* First mask out possible old values for GCR3 table */
-               tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
-               flags    &= ~tmp;
-
-               tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
-               flags    &= ~tmp;
-
-               /* Encode GCR3 table into DTE */
-               tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
-               pte_root |= tmp;
-
-               tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
-               flags    |= tmp;
-
-               tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
-               flags    |= tmp;
-       }
-
-       flags &= ~DEV_DOMID_MASK;
-       flags |= domain->id;
-
-       old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
-       amd_iommu_dev_table[devid].data[1]  = flags;
-       amd_iommu_dev_table[devid].data[0]  = pte_root;
-
-       /*
-        * A kdump kernel might be replacing a domain ID that was copied from
-        * the previous kernel--if so, it needs to flush the translation cache
-        * entries for the old domain ID that is being overwritten
-        */
-       if (old_domid) {
-               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
-               amd_iommu_flush_tlb_domid(iommu, old_domid);
-       }
-}
-
-static void clear_dte_entry(u16 devid)
-{
-       /* remove entry from the device table seen by the hardware */
-       amd_iommu_dev_table[devid].data[0]  = DTE_FLAG_V | DTE_FLAG_TV;
-       amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
-
-       amd_iommu_apply_erratum_63(devid);
-}
-
-static void do_attach(struct iommu_dev_data *dev_data,
-                     struct protection_domain *domain)
-{
-       struct domain_pgtable pgtable;
-       struct amd_iommu *iommu;
-       bool ats;
-
-       iommu = amd_iommu_rlookup_table[dev_data->devid];
-       ats   = dev_data->ats.enabled;
-
-       /* Update data structures */
-       dev_data->domain = domain;
-       list_add(&dev_data->list, &domain->dev_list);
-
-       /* Do reference counting */
-       domain->dev_iommu[iommu->index] += 1;
-       domain->dev_cnt                 += 1;
-
-       /* Update device table */
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       set_dte_entry(dev_data->devid, domain, &pgtable,
-                     ats, dev_data->iommu_v2);
-       clone_aliases(dev_data->pdev);
-
-       device_flush_dte(dev_data);
-}
-
-static void do_detach(struct iommu_dev_data *dev_data)
-{
-       struct protection_domain *domain = dev_data->domain;
-       struct amd_iommu *iommu;
-
-       iommu = amd_iommu_rlookup_table[dev_data->devid];
-
-       /* Update data structures */
-       dev_data->domain = NULL;
-       list_del(&dev_data->list);
-       clear_dte_entry(dev_data->devid);
-       clone_aliases(dev_data->pdev);
-
-       /* Flush the DTE entry */
-       device_flush_dte(dev_data);
-
-       /* Flush IOTLB */
-       domain_flush_tlb_pde(domain);
-
-       /* Wait for the flushes to finish */
-       domain_flush_complete(domain);
-
-       /* decrease reference counters - needs to happen after the flushes */
-       domain->dev_iommu[iommu->index] -= 1;
-       domain->dev_cnt                 -= 1;
-}
-
-static void pdev_iommuv2_disable(struct pci_dev *pdev)
-{
-       pci_disable_ats(pdev);
-       pci_disable_pri(pdev);
-       pci_disable_pasid(pdev);
-}
-
-/* FIXME: Change generic reset-function to do the same */
-static int pri_reset_while_enabled(struct pci_dev *pdev)
-{
-       u16 control;
-       int pos;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
-               return -EINVAL;
-
-       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
-       control |= PCI_PRI_CTRL_RESET;
-       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
-
-       return 0;
-}
-
-static int pdev_iommuv2_enable(struct pci_dev *pdev)
-{
-       bool reset_enable;
-       int reqs, ret;
-
-       /* FIXME: Hardcode number of outstanding requests for now */
-       reqs = 32;
-       if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
-               reqs = 1;
-       reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
-
-       /* Only allow access to user-accessible pages */
-       ret = pci_enable_pasid(pdev, 0);
-       if (ret)
-               goto out_err;
-
-       /* First reset the PRI state of the device */
-       ret = pci_reset_pri(pdev);
-       if (ret)
-               goto out_err;
-
-       /* Enable PRI */
-       ret = pci_enable_pri(pdev, reqs);
-       if (ret)
-               goto out_err;
-
-       if (reset_enable) {
-               ret = pri_reset_while_enabled(pdev);
-               if (ret)
-                       goto out_err;
-       }
-
-       ret = pci_enable_ats(pdev, PAGE_SHIFT);
-       if (ret)
-               goto out_err;
-
-       return 0;
-
-out_err:
-       pci_disable_pri(pdev);
-       pci_disable_pasid(pdev);
-
-       return ret;
-}
-
-/*
- * If a device is not yet associated with a domain, this function makes the
- * device visible in the domain
- */
-static int attach_device(struct device *dev,
-                        struct protection_domain *domain)
-{
-       struct iommu_dev_data *dev_data;
-       struct pci_dev *pdev;
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       dev_data = dev_iommu_priv_get(dev);
-
-       spin_lock(&dev_data->lock);
-
-       ret = -EBUSY;
-       if (dev_data->domain != NULL)
-               goto out;
-
-       if (!dev_is_pci(dev))
-               goto skip_ats_check;
-
-       pdev = to_pci_dev(dev);
-       if (domain->flags & PD_IOMMUV2_MASK) {
-               struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
-
-               ret = -EINVAL;
-               if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
-                       goto out;
-
-               if (dev_data->iommu_v2) {
-                       if (pdev_iommuv2_enable(pdev) != 0)
-                               goto out;
-
-                       dev_data->ats.enabled = true;
-                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
-               }
-       } else if (amd_iommu_iotlb_sup &&
-                  pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
-               dev_data->ats.enabled = true;
-               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-       }
-
-skip_ats_check:
-       ret = 0;
-
-       do_attach(dev_data, domain);
-
-       /*
-        * We might boot into a crash-kernel here. The crashed kernel
-        * left the caches in the IOMMU dirty. So we have to flush
-        * here to evict all dirty stuff.
-        */
-       domain_flush_tlb_pde(domain);
-
-       domain_flush_complete(domain);
-
-out:
-       spin_unlock(&dev_data->lock);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-
-/*
- * Removes a device from a protection domain (with devtable_lock held)
- */
-static void detach_device(struct device *dev)
-{
-       struct protection_domain *domain;
-       struct iommu_dev_data *dev_data;
-       unsigned long flags;
-
-       dev_data = dev_iommu_priv_get(dev);
-       domain   = dev_data->domain;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       spin_lock(&dev_data->lock);
-
-       /*
-        * First check if the device is still attached. It might already
-        * be detached from its domain because the generic
-        * iommu_detach_group code detached it and we try again here in
-        * our alias handling.
-        */
-       if (WARN_ON(!dev_data->domain))
-               goto out;
-
-       do_detach(dev_data);
-
-       if (!dev_is_pci(dev))
-               goto out;
-
-       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
-               pdev_iommuv2_disable(to_pci_dev(dev));
-       else if (dev_data->ats.enabled)
-               pci_disable_ats(to_pci_dev(dev));
-
-       dev_data->ats.enabled = false;
-
-out:
-       spin_unlock(&dev_data->lock);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static struct iommu_device *amd_iommu_probe_device(struct device *dev)
-{
-       struct iommu_device *iommu_dev;
-       struct amd_iommu *iommu;
-       int ret, devid;
-
-       if (!check_device(dev))
-               return ERR_PTR(-ENODEV);
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return ERR_PTR(devid);
-
-       iommu = amd_iommu_rlookup_table[devid];
-
-       if (dev_iommu_priv_get(dev))
-               return &iommu->iommu;
-
-       ret = iommu_init_device(dev);
-       if (ret) {
-               if (ret != -ENOTSUPP)
-                       dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
-               iommu_dev = ERR_PTR(ret);
-               iommu_ignore_device(dev);
-       } else {
-               iommu_dev = &iommu->iommu;
-       }
-
-       iommu_completion_wait(iommu);
-
-       return iommu_dev;
-}
-
-static void amd_iommu_probe_finalize(struct device *dev)
-{
-       struct iommu_domain *domain;
-
-       /* Domains are initialized for this device - have a look what we ended up with */
-       domain = iommu_get_domain_for_dev(dev);
-       if (domain->type == IOMMU_DOMAIN_DMA)
-               iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
-}
-
-static void amd_iommu_release_device(struct device *dev)
-{
-       int devid = get_device_id(dev);
-       struct amd_iommu *iommu;
-
-       if (!check_device(dev))
-               return;
-
-       iommu = amd_iommu_rlookup_table[devid];
-
-       amd_iommu_uninit_device(dev);
-       iommu_completion_wait(iommu);
-}
-
-static struct iommu_group *amd_iommu_device_group(struct device *dev)
-{
-       if (dev_is_pci(dev))
-               return pci_device_group(dev);
-
-       return acpihid_device_group(dev);
-}
-
-static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
-               enum iommu_attr attr, void *data)
-{
-       switch (domain->type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               return -ENODEV;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = !amd_iommu_unmap_flush;
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-}
-
-/*****************************************************************************
- *
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-static void update_device_table(struct protection_domain *domain,
-                               struct domain_pgtable *pgtable)
-{
-       struct iommu_dev_data *dev_data;
-
-       list_for_each_entry(dev_data, &domain->dev_list, list) {
-               set_dte_entry(dev_data->devid, domain, pgtable,
-                             dev_data->ats.enabled, dev_data->iommu_v2);
-               clone_aliases(dev_data->pdev);
-       }
-}
-
-static void update_and_flush_device_table(struct protection_domain *domain,
-                                         struct domain_pgtable *pgtable)
-{
-       update_device_table(domain, pgtable);
-       domain_flush_devices(domain);
-}
-
-static void update_domain(struct protection_domain *domain)
-{
-       struct domain_pgtable pgtable;
-
-       /* Update device table */
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       update_and_flush_device_table(domain, &pgtable);
-
-       /* Flush domain TLB(s) and wait for completion */
-       domain_flush_tlb_pde(domain);
-       domain_flush_complete(domain);
-}
-
-int __init amd_iommu_init_api(void)
-{
-       int ret, err = 0;
-
-       ret = iova_cache_get();
-       if (ret)
-               return ret;
-
-       err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
-       if (err)
-               return err;
-#ifdef CONFIG_ARM_AMBA
-       err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
-       if (err)
-               return err;
-#endif
-       err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-int __init amd_iommu_init_dma_ops(void)
-{
-       swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-
-       if (amd_iommu_unmap_flush)
-               pr_info("IO/TLB flush on unmap enabled\n");
-       else
-               pr_info("Lazy IO/TLB flushing enabled\n");
-
-       return 0;
-
-}
-
-/*****************************************************************************
- *
- * The following functions belong to the exported interface of AMD IOMMU
- *
- * This interface allows access to lower level functions of the IOMMU
- * like protection domain handling and assignement of devices to domains
- * which is not possible with the dma_ops interface.
- *
- *****************************************************************************/
-
-static void cleanup_domain(struct protection_domain *domain)
-{
-       struct iommu_dev_data *entry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       while (!list_empty(&domain->dev_list)) {
-               entry = list_first_entry(&domain->dev_list,
-                                        struct iommu_dev_data, list);
-               BUG_ON(!entry->domain);
-               do_detach(entry);
-       }
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static void protection_domain_free(struct protection_domain *domain)
-{
-       struct domain_pgtable pgtable;
-
-       if (!domain)
-               return;
-
-       if (domain->id)
-               domain_id_free(domain->id);
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       atomic64_set(&domain->pt_root, 0);
-       free_pagetable(&pgtable);
-
-       kfree(domain);
-}
-
-static int protection_domain_init(struct protection_domain *domain, int mode)
-{
-       u64 *pt_root = NULL, root;
-
-       BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
-
-       spin_lock_init(&domain->lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&domain->dev_list);
-
-       if (mode != PAGE_MODE_NONE) {
-               pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-               if (!pt_root)
-                       return -ENOMEM;
-       }
-
-       root = amd_iommu_domain_encode_pgtable(pt_root, mode);
-       atomic64_set(&domain->pt_root, root);
-
-       return 0;
-}
-
-static struct protection_domain *protection_domain_alloc(int mode)
-{
-       struct protection_domain *domain;
-
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (!domain)
-               return NULL;
-
-       if (protection_domain_init(domain, mode))
-               goto out_err;
-
-       return domain;
-
-out_err:
-       kfree(domain);
-
-       return NULL;
-}
-
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
-{
-       struct protection_domain *domain;
-       int mode = DEFAULT_PGTABLE_LEVEL;
-
-       if (type == IOMMU_DOMAIN_IDENTITY)
-               mode = PAGE_MODE_NONE;
-
-       domain = protection_domain_alloc(mode);
-       if (!domain)
-               return NULL;
-
-       domain->domain.geometry.aperture_start = 0;
-       domain->domain.geometry.aperture_end   = ~0ULL;
-       domain->domain.geometry.force_aperture = true;
-
-       if (type == IOMMU_DOMAIN_DMA &&
-           iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
-               goto free_domain;
-
-       return &domain->domain;
-
-free_domain:
-       protection_domain_free(domain);
-
-       return NULL;
-}
-
-static void amd_iommu_domain_free(struct iommu_domain *dom)
-{
-       struct protection_domain *domain;
-
-       domain = to_pdomain(dom);
-
-       if (domain->dev_cnt > 0)
-               cleanup_domain(domain);
-
-       BUG_ON(domain->dev_cnt != 0);
-
-       if (!dom)
-               return;
-
-       if (dom->type == IOMMU_DOMAIN_DMA)
-               iommu_put_dma_cookie(&domain->domain);
-
-       if (domain->flags & PD_IOMMUV2_MASK)
-               free_gcr3_table(domain);
-
-       protection_domain_free(domain);
-}
-
-static void amd_iommu_detach_device(struct iommu_domain *dom,
-                                   struct device *dev)
-{
-       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
-       struct amd_iommu *iommu;
-       int devid;
-
-       if (!check_device(dev))
-               return;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return;
-
-       if (dev_data->domain != NULL)
-               detach_device(dev);
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (!iommu)
-               return;
-
-#ifdef CONFIG_IRQ_REMAP
-       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
-           (dom->type == IOMMU_DOMAIN_UNMANAGED))
-               dev_data->use_vapic = 0;
-#endif
-
-       iommu_completion_wait(iommu);
-}
-
-static int amd_iommu_attach_device(struct iommu_domain *dom,
-                                  struct device *dev)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       struct iommu_dev_data *dev_data;
-       struct amd_iommu *iommu;
-       int ret;
-
-       if (!check_device(dev))
-               return -EINVAL;
-
-       dev_data = dev_iommu_priv_get(dev);
-       dev_data->defer_attach = false;
-
-       iommu = amd_iommu_rlookup_table[dev_data->devid];
-       if (!iommu)
-               return -EINVAL;
-
-       if (dev_data->domain)
-               detach_device(dev);
-
-       ret = attach_device(dev, domain);
-
-#ifdef CONFIG_IRQ_REMAP
-       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
-               if (dom->type == IOMMU_DOMAIN_UNMANAGED)
-                       dev_data->use_vapic = 1;
-               else
-                       dev_data->use_vapic = 0;
-       }
-#endif
-
-       iommu_completion_wait(iommu);
-
-       return ret;
-}
-
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
-                        phys_addr_t paddr, size_t page_size, int iommu_prot,
-                        gfp_t gfp)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       struct domain_pgtable pgtable;
-       int prot = 0;
-       int ret;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       if (pgtable.mode == PAGE_MODE_NONE)
-               return -EINVAL;
-
-       if (iommu_prot & IOMMU_READ)
-               prot |= IOMMU_PROT_IR;
-       if (iommu_prot & IOMMU_WRITE)
-               prot |= IOMMU_PROT_IW;
-
-       ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
-       domain_flush_np_cache(domain, iova, page_size);
-
-       return ret;
-}
-
-static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
-                             size_t page_size,
-                             struct iommu_iotlb_gather *gather)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       struct domain_pgtable pgtable;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       if (pgtable.mode == PAGE_MODE_NONE)
-               return 0;
-
-       return iommu_unmap_page(domain, iova, page_size);
-}
-
-static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
-                                         dma_addr_t iova)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long offset_mask, pte_pgsize;
-       struct domain_pgtable pgtable;
-       u64 *pte, __pte;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       if (pgtable.mode == PAGE_MODE_NONE)
-               return iova;
-
-       pte = fetch_pte(domain, iova, &pte_pgsize);
-
-       if (!pte || !IOMMU_PTE_PRESENT(*pte))
-               return 0;
-
-       offset_mask = pte_pgsize - 1;
-       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
-
-       return (__pte & ~offset_mask) | (iova & offset_mask);
-}
-
-static bool amd_iommu_capable(enum iommu_cap cap)
-{
-       switch (cap) {
-       case IOMMU_CAP_CACHE_COHERENCY:
-               return true;
-       case IOMMU_CAP_INTR_REMAP:
-               return (irq_remapping_enabled == 1);
-       case IOMMU_CAP_NOEXEC:
-               return false;
-       default:
-               break;
-       }
-
-       return false;
-}
-
-static void amd_iommu_get_resv_regions(struct device *dev,
-                                      struct list_head *head)
-{
-       struct iommu_resv_region *region;
-       struct unity_map_entry *entry;
-       int devid;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return;
-
-       list_for_each_entry(entry, &amd_iommu_unity_map, list) {
-               int type, prot = 0;
-               size_t length;
-
-               if (devid < entry->devid_start || devid > entry->devid_end)
-                       continue;
-
-               type   = IOMMU_RESV_DIRECT;
-               length = entry->address_end - entry->address_start;
-               if (entry->prot & IOMMU_PROT_IR)
-                       prot |= IOMMU_READ;
-               if (entry->prot & IOMMU_PROT_IW)
-                       prot |= IOMMU_WRITE;
-               if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
-                       /* Exclusion range */
-                       type = IOMMU_RESV_RESERVED;
-
-               region = iommu_alloc_resv_region(entry->address_start,
-                                                length, prot, type);
-               if (!region) {
-                       dev_err(dev, "Out of memory allocating dm-regions\n");
-                       return;
-               }
-               list_add_tail(&region->list, head);
-       }
-
-       region = iommu_alloc_resv_region(MSI_RANGE_START,
-                                        MSI_RANGE_END - MSI_RANGE_START + 1,
-                                        0, IOMMU_RESV_MSI);
-       if (!region)
-               return;
-       list_add_tail(&region->list, head);
-
-       region = iommu_alloc_resv_region(HT_RANGE_START,
-                                        HT_RANGE_END - HT_RANGE_START + 1,
-                                        0, IOMMU_RESV_RESERVED);
-       if (!region)
-               return;
-       list_add_tail(&region->list, head);
-}
-
-bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
-                                 struct device *dev)
-{
-       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
-
-       return dev_data->defer_attach;
-}
-EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
-
-static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
-{
-       struct protection_domain *dom = to_pdomain(domain);
-       unsigned long flags;
-
-       spin_lock_irqsave(&dom->lock, flags);
-       domain_flush_tlb_pde(dom);
-       domain_flush_complete(dom);
-       spin_unlock_irqrestore(&dom->lock, flags);
-}
-
-static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
-                                struct iommu_iotlb_gather *gather)
-{
-       amd_iommu_flush_iotlb_all(domain);
-}
-
-static int amd_iommu_def_domain_type(struct device *dev)
-{
-       struct iommu_dev_data *dev_data;
-
-       dev_data = dev_iommu_priv_get(dev);
-       if (!dev_data)
-               return 0;
-
-       if (dev_data->iommu_v2)
-               return IOMMU_DOMAIN_IDENTITY;
-
-       return 0;
-}
-
-const struct iommu_ops amd_iommu_ops = {
-       .capable = amd_iommu_capable,
-       .domain_alloc = amd_iommu_domain_alloc,
-       .domain_free  = amd_iommu_domain_free,
-       .attach_dev = amd_iommu_attach_device,
-       .detach_dev = amd_iommu_detach_device,
-       .map = amd_iommu_map,
-       .unmap = amd_iommu_unmap,
-       .iova_to_phys = amd_iommu_iova_to_phys,
-       .probe_device = amd_iommu_probe_device,
-       .release_device = amd_iommu_release_device,
-       .probe_finalize = amd_iommu_probe_finalize,
-       .device_group = amd_iommu_device_group,
-       .domain_get_attr = amd_iommu_domain_get_attr,
-       .get_resv_regions = amd_iommu_get_resv_regions,
-       .put_resv_regions = generic_iommu_put_resv_regions,
-       .is_attach_deferred = amd_iommu_is_attach_deferred,
-       .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
-       .flush_iotlb_all = amd_iommu_flush_iotlb_all,
-       .iotlb_sync = amd_iommu_iotlb_sync,
-       .def_domain_type = amd_iommu_def_domain_type,
-};
-
-/*****************************************************************************
- *
- * The next functions do a basic initialization of IOMMU for pass through
- * mode
- *
- * In passthrough mode the IOMMU is initialized and enabled but not used for
- * DMA-API translation.
- *
- *****************************************************************************/
-
-/* IOMMUv2 specific functions */
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
-
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
-
-void amd_iommu_domain_direct_map(struct iommu_domain *dom)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       struct domain_pgtable pgtable;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       /* First save pgtable configuration*/
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-
-       /* Update data structure */
-       atomic64_set(&domain->pt_root, 0);
-
-       /* Make changes visible to IOMMUs */
-       update_domain(domain);
-
-       /* Page-table is not visible to IOMMU anymore, so free it */
-       free_pagetable(&pgtable);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-       int levels, ret;
-
-       if (pasids <= 0 || pasids > (PASID_MASK + 1))
-               return -EINVAL;
-
-       /* Number of GCR3 table levels required */
-       for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
-               levels += 1;
-
-       if (levels > amd_iommu_max_glx_val)
-               return -EINVAL;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       /*
-        * Save us all sanity checks whether devices already in the
-        * domain support IOMMUv2. Just force that the domain has no
-        * devices attached when it is switched into IOMMUv2 mode.
-        */
-       ret = -EBUSY;
-       if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
-               goto out;
-
-       ret = -ENOMEM;
-       domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
-       if (domain->gcr3_tbl == NULL)
-               goto out;
-
-       domain->glx      = levels;
-       domain->flags   |= PD_IOMMUV2_MASK;
-
-       update_domain(domain);
-
-       ret = 0;
-
-out:
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-
-static int __flush_pasid(struct protection_domain *domain, int pasid,
-                        u64 address, bool size)
-{
-       struct iommu_dev_data *dev_data;
-       struct iommu_cmd cmd;
-       int i, ret;
-
-       if (!(domain->flags & PD_IOMMUV2_MASK))
-               return -EINVAL;
-
-       build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
-
-       /*
-        * IOMMU TLB needs to be flushed before Device TLB to
-        * prevent device TLB refill from IOMMU TLB
-        */
-       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
-               if (domain->dev_iommu[i] == 0)
-                       continue;
-
-               ret = iommu_queue_command(amd_iommus[i], &cmd);
-               if (ret != 0)
-                       goto out;
-       }
-
-       /* Wait until IOMMU TLB flushes are complete */
-       domain_flush_complete(domain);
-
-       /* Now flush device TLBs */
-       list_for_each_entry(dev_data, &domain->dev_list, list) {
-               struct amd_iommu *iommu;
-               int qdep;
-
-               /*
-                  There might be non-IOMMUv2 capable devices in an IOMMUv2
-                * domain.
-                */
-               if (!dev_data->ats.enabled)
-                       continue;
-
-               qdep  = dev_data->ats.qdep;
-               iommu = amd_iommu_rlookup_table[dev_data->devid];
-
-               build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
-                                     qdep, address, size);
-
-               ret = iommu_queue_command(iommu, &cmd);
-               if (ret != 0)
-                       goto out;
-       }
-
-       /* Wait until all device TLBs are flushed */
-       domain_flush_complete(domain);
-
-       ret = 0;
-
-out:
-
-       return ret;
-}
-
-static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
-                                 u64 address)
-{
-       return __flush_pasid(domain, pasid, address, false);
-}
-
-int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
-                        u64 address)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&domain->lock, flags);
-       ret = __amd_iommu_flush_page(domain, pasid, address);
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_flush_page);
-
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
-{
-       return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                            true);
-}
-
-int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&domain->lock, flags);
-       ret = __amd_iommu_flush_tlb(domain, pasid);
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_flush_tlb);
-
-static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
-{
-       int index;
-       u64 *pte;
-
-       while (true) {
-
-               index = (pasid >> (9 * level)) & 0x1ff;
-               pte   = &root[index];
-
-               if (level == 0)
-                       break;
-
-               if (!(*pte & GCR3_VALID)) {
-                       if (!alloc)
-                               return NULL;
-
-                       root = (void *)get_zeroed_page(GFP_ATOMIC);
-                       if (root == NULL)
-                               return NULL;
-
-                       *pte = iommu_virt_to_phys(root) | GCR3_VALID;
-               }
-
-               root = iommu_phys_to_virt(*pte & PAGE_MASK);
-
-               level -= 1;
-       }
-
-       return pte;
-}
-
-static int __set_gcr3(struct protection_domain *domain, int pasid,
-                     unsigned long cr3)
-{
-       struct domain_pgtable pgtable;
-       u64 *pte;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       if (pgtable.mode != PAGE_MODE_NONE)
-               return -EINVAL;
-
-       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
-       if (pte == NULL)
-               return -ENOMEM;
-
-       *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
-
-       return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-static int __clear_gcr3(struct protection_domain *domain, int pasid)
-{
-       struct domain_pgtable pgtable;
-       u64 *pte;
-
-       amd_iommu_domain_get_pgtable(domain, &pgtable);
-       if (pgtable.mode != PAGE_MODE_NONE)
-               return -EINVAL;
-
-       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
-       if (pte == NULL)
-               return 0;
-
-       *pte = 0;
-
-       return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
-                             unsigned long cr3)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&domain->lock, flags);
-       ret = __set_gcr3(domain, pasid, cr3);
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
-
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&domain->lock, flags);
-       ret = __clear_gcr3(domain, pasid);
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
-
-int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
-                          int status, int tag)
-{
-       struct iommu_dev_data *dev_data;
-       struct amd_iommu *iommu;
-       struct iommu_cmd cmd;
-
-       dev_data = dev_iommu_priv_get(&pdev->dev);
-       iommu    = amd_iommu_rlookup_table[dev_data->devid];
-
-       build_complete_ppr(&cmd, dev_data->devid, pasid, status,
-                          tag, dev_data->pri_tlp);
-
-       return iommu_queue_command(iommu, &cmd);
-}
-EXPORT_SYMBOL(amd_iommu_complete_ppr);
-
-struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
-{
-       struct protection_domain *pdomain;
-       struct iommu_dev_data *dev_data;
-       struct device *dev = &pdev->dev;
-       struct iommu_domain *io_domain;
-
-       if (!check_device(dev))
-               return NULL;
-
-       dev_data  = dev_iommu_priv_get(&pdev->dev);
-       pdomain   = dev_data->domain;
-       io_domain = iommu_get_domain_for_dev(dev);
-
-       if (pdomain == NULL && dev_data->defer_attach) {
-               dev_data->defer_attach = false;
-               pdomain = to_pdomain(io_domain);
-               attach_device(dev, pdomain);
-       }
-
-       if (pdomain == NULL)
-               return NULL;
-
-       if (io_domain->type != IOMMU_DOMAIN_DMA)
-               return NULL;
-
-       /* Only return IOMMUv2 domains */
-       if (!(pdomain->flags & PD_IOMMUV2_MASK))
-               return NULL;
-
-       return &pdomain->domain;
-}
-EXPORT_SYMBOL(amd_iommu_get_v2_domain);
-
-void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
-{
-       struct iommu_dev_data *dev_data;
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       dev_data = dev_iommu_priv_get(&pdev->dev);
-       dev_data->errata |= (1 << erratum);
-}
-EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
-
-int amd_iommu_device_info(struct pci_dev *pdev,
-                          struct amd_iommu_device_info *info)
-{
-       int max_pasids;
-       int pos;
-
-       if (pdev == NULL || info == NULL)
-               return -EINVAL;
-
-       if (!amd_iommu_v2_supported())
-               return -EINVAL;
-
-       memset(info, 0, sizeof(*info));
-
-       if (pci_ats_supported(pdev))
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (pos)
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (pos) {
-               int features;
-
-               max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
-               max_pasids = min(max_pasids, (1 << 20));
-
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
-               info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
-
-               features = pci_pasid_features(pdev);
-               if (features & PCI_PASID_CAP_EXEC)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
-               if (features & PCI_PASID_CAP_PRIV)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_device_info);
-
-#ifdef CONFIG_IRQ_REMAP
-
-/*****************************************************************************
- *
- * Interrupt Remapping Implementation
- *
- *****************************************************************************/
-
-static struct irq_chip amd_ir_chip;
-static DEFINE_SPINLOCK(iommu_table_lock);
-
-static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
-{
-       u64 dte;
-
-       dte     = amd_iommu_dev_table[devid].data[2];
-       dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
-       dte     |= iommu_virt_to_phys(table->table);
-       dte     |= DTE_IRQ_REMAP_INTCTL;
-       dte     |= DTE_IRQ_TABLE_LEN;
-       dte     |= DTE_IRQ_REMAP_ENABLE;
-
-       amd_iommu_dev_table[devid].data[2] = dte;
-}
-
-static struct irq_remap_table *get_irq_table(u16 devid)
-{
-       struct irq_remap_table *table;
-
-       if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
-                     "%s: no iommu for devid %x\n", __func__, devid))
-               return NULL;
-
-       table = irq_lookup_table[devid];
-       if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
-               return NULL;
-
-       return table;
-}
-
-static struct irq_remap_table *__alloc_irq_table(void)
-{
-       struct irq_remap_table *table;
-
-       table = kzalloc(sizeof(*table), GFP_KERNEL);
-       if (!table)
-               return NULL;
-
-       table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
-       if (!table->table) {
-               kfree(table);
-               return NULL;
-       }
-       raw_spin_lock_init(&table->lock);
-
-       if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
-               memset(table->table, 0,
-                      MAX_IRQS_PER_TABLE * sizeof(u32));
-       else
-               memset(table->table, 0,
-                      (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
-       return table;
-}
-
-static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
-                                 struct irq_remap_table *table)
-{
-       irq_lookup_table[devid] = table;
-       set_dte_irq_entry(devid, table);
-       iommu_flush_dte(iommu, devid);
-}
-
-static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
-                                      void *data)
-{
-       struct irq_remap_table *table = data;
-
-       irq_lookup_table[alias] = table;
-       set_dte_irq_entry(alias, table);
-
-       iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
-
-       return 0;
-}
-
-static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
-{
-       struct irq_remap_table *table = NULL;
-       struct irq_remap_table *new_table = NULL;
-       struct amd_iommu *iommu;
-       unsigned long flags;
-       u16 alias;
-
-       spin_lock_irqsave(&iommu_table_lock, flags);
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (!iommu)
-               goto out_unlock;
-
-       table = irq_lookup_table[devid];
-       if (table)
-               goto out_unlock;
-
-       alias = amd_iommu_alias_table[devid];
-       table = irq_lookup_table[alias];
-       if (table) {
-               set_remap_table_entry(iommu, devid, table);
-               goto out_wait;
-       }
-       spin_unlock_irqrestore(&iommu_table_lock, flags);
-
-       /* Nothing there yet, allocate new irq remapping table */
-       new_table = __alloc_irq_table();
-       if (!new_table)
-               return NULL;
-
-       spin_lock_irqsave(&iommu_table_lock, flags);
-
-       table = irq_lookup_table[devid];
-       if (table)
-               goto out_unlock;
-
-       table = irq_lookup_table[alias];
-       if (table) {
-               set_remap_table_entry(iommu, devid, table);
-               goto out_wait;
-       }
-
-       table = new_table;
-       new_table = NULL;
-
-       if (pdev)
-               pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
-                                      table);
-       else
-               set_remap_table_entry(iommu, devid, table);
-
-       if (devid != alias)
-               set_remap_table_entry(iommu, alias, table);
-
-out_wait:
-       iommu_completion_wait(iommu);
-
-out_unlock:
-       spin_unlock_irqrestore(&iommu_table_lock, flags);
-
-       if (new_table) {
-               kmem_cache_free(amd_iommu_irq_cache, new_table->table);
-               kfree(new_table);
-       }
-       return table;
-}
-
-static int alloc_irq_index(u16 devid, int count, bool align,
-                          struct pci_dev *pdev)
-{
-       struct irq_remap_table *table;
-       int index, c, alignment = 1;
-       unsigned long flags;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
-       if (!iommu)
-               return -ENODEV;
-
-       table = alloc_irq_table(devid, pdev);
-       if (!table)
-               return -ENODEV;
-
-       if (align)
-               alignment = roundup_pow_of_two(count);
-
-       raw_spin_lock_irqsave(&table->lock, flags);
-
-       /* Scan table for free entries */
-       for (index = ALIGN(table->min_index, alignment), c = 0;
-            index < MAX_IRQS_PER_TABLE;) {
-               if (!iommu->irte_ops->is_allocated(table, index)) {
-                       c += 1;
-               } else {
-                       c     = 0;
-                       index = ALIGN(index + 1, alignment);
-                       continue;
-               }
-
-               if (c == count) {
-                       for (; c != 0; --c)
-                               iommu->irte_ops->set_allocated(table, index - c + 1);
-
-                       index -= count - 1;
-                       goto out;
-               }
-
-               index++;
-       }
-
-       index = -ENOSPC;
-
-out:
-       raw_spin_unlock_irqrestore(&table->lock, flags);
-
-       return index;
-}
-
-static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
-                         struct amd_ir_data *data)
-{
-       struct irq_remap_table *table;
-       struct amd_iommu *iommu;
-       unsigned long flags;
-       struct irte_ga *entry;
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (iommu == NULL)
-               return -EINVAL;
-
-       table = get_irq_table(devid);
-       if (!table)
-               return -ENOMEM;
-
-       raw_spin_lock_irqsave(&table->lock, flags);
-
-       entry = (struct irte_ga *)table->table;
-       entry = &entry[index];
-       entry->lo.fields_remap.valid = 0;
-       entry->hi.val = irte->hi.val;
-       entry->lo.val = irte->lo.val;
-       entry->lo.fields_remap.valid = 1;
-       if (data)
-               data->ref = entry;
-
-       raw_spin_unlock_irqrestore(&table->lock, flags);
-
-       iommu_flush_irt(iommu, devid);
-       iommu_completion_wait(iommu);
-
-       return 0;
-}
-
-static int modify_irte(u16 devid, int index, union irte *irte)
-{
-       struct irq_remap_table *table;
-       struct amd_iommu *iommu;
-       unsigned long flags;
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (iommu == NULL)
-               return -EINVAL;
-
-       table = get_irq_table(devid);
-       if (!table)
-               return -ENOMEM;
-
-       raw_spin_lock_irqsave(&table->lock, flags);
-       table->table[index] = irte->val;
-       raw_spin_unlock_irqrestore(&table->lock, flags);
-
-       iommu_flush_irt(iommu, devid);
-       iommu_completion_wait(iommu);
-
-       return 0;
-}
-
-static void free_irte(u16 devid, int index)
-{
-       struct irq_remap_table *table;
-       struct amd_iommu *iommu;
-       unsigned long flags;
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (iommu == NULL)
-               return;
-
-       table = get_irq_table(devid);
-       if (!table)
-               return;
-
-       raw_spin_lock_irqsave(&table->lock, flags);
-       iommu->irte_ops->clear_allocated(table, index);
-       raw_spin_unlock_irqrestore(&table->lock, flags);
-
-       iommu_flush_irt(iommu, devid);
-       iommu_completion_wait(iommu);
-}
-
-static void irte_prepare(void *entry,
-                        u32 delivery_mode, u32 dest_mode,
-                        u8 vector, u32 dest_apicid, int devid)
-{
-       union irte *irte = (union irte *) entry;
-
-       irte->val                = 0;
-       irte->fields.vector      = vector;
-       irte->fields.int_type    = delivery_mode;
-       irte->fields.destination = dest_apicid;
-       irte->fields.dm          = dest_mode;
-       irte->fields.valid       = 1;
-}
-
-static void irte_ga_prepare(void *entry,
-                           u32 delivery_mode, u32 dest_mode,
-                           u8 vector, u32 dest_apicid, int devid)
-{
-       struct irte_ga *irte = (struct irte_ga *) entry;
-
-       irte->lo.val                      = 0;
-       irte->hi.val                      = 0;
-       irte->lo.fields_remap.int_type    = delivery_mode;
-       irte->lo.fields_remap.dm          = dest_mode;
-       irte->hi.fields.vector            = vector;
-       irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
-       irte->hi.fields.destination       = APICID_TO_IRTE_DEST_HI(dest_apicid);
-       irte->lo.fields_remap.valid       = 1;
-}
-
-static void irte_activate(void *entry, u16 devid, u16 index)
-{
-       union irte *irte = (union irte *) entry;
-
-       irte->fields.valid = 1;
-       modify_irte(devid, index, irte);
-}
-
-static void irte_ga_activate(void *entry, u16 devid, u16 index)
-{
-       struct irte_ga *irte = (struct irte_ga *) entry;
-
-       irte->lo.fields_remap.valid = 1;
-       modify_irte_ga(devid, index, irte, NULL);
-}
-
-static void irte_deactivate(void *entry, u16 devid, u16 index)
-{
-       union irte *irte = (union irte *) entry;
-
-       irte->fields.valid = 0;
-       modify_irte(devid, index, irte);
-}
-
-static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
-{
-       struct irte_ga *irte = (struct irte_ga *) entry;
-
-       irte->lo.fields_remap.valid = 0;
-       modify_irte_ga(devid, index, irte, NULL);
-}
-
-static void irte_set_affinity(void *entry, u16 devid, u16 index,
-                             u8 vector, u32 dest_apicid)
-{
-       union irte *irte = (union irte *) entry;
-
-       irte->fields.vector = vector;
-       irte->fields.destination = dest_apicid;
-       modify_irte(devid, index, irte);
-}
-
-static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
-                                u8 vector, u32 dest_apicid)
-{
-       struct irte_ga *irte = (struct irte_ga *) entry;
-
-       if (!irte->lo.fields_remap.guest_mode) {
-               irte->hi.fields.vector = vector;
-               irte->lo.fields_remap.destination =
-                                       APICID_TO_IRTE_DEST_LO(dest_apicid);
-               irte->hi.fields.destination =
-                                       APICID_TO_IRTE_DEST_HI(dest_apicid);
-               modify_irte_ga(devid, index, irte, NULL);
-       }
-}
-
-#define IRTE_ALLOCATED (~1U)
-static void irte_set_allocated(struct irq_remap_table *table, int index)
-{
-       table->table[index] = IRTE_ALLOCATED;
-}
-
-static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
-{
-       struct irte_ga *ptr = (struct irte_ga *)table->table;
-       struct irte_ga *irte = &ptr[index];
-
-       memset(&irte->lo.val, 0, sizeof(u64));
-       memset(&irte->hi.val, 0, sizeof(u64));
-       irte->hi.fields.vector = 0xff;
-}
-
-static bool irte_is_allocated(struct irq_remap_table *table, int index)
-{
-       union irte *ptr = (union irte *)table->table;
-       union irte *irte = &ptr[index];
-
-       return irte->val != 0;
-}
-
-static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
-{
-       struct irte_ga *ptr = (struct irte_ga *)table->table;
-       struct irte_ga *irte = &ptr[index];
-
-       return irte->hi.fields.vector != 0;
-}
-
-static void irte_clear_allocated(struct irq_remap_table *table, int index)
-{
-       table->table[index] = 0;
-}
-
-static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
-{
-       struct irte_ga *ptr = (struct irte_ga *)table->table;
-       struct irte_ga *irte = &ptr[index];
-
-       memset(&irte->lo.val, 0, sizeof(u64));
-       memset(&irte->hi.val, 0, sizeof(u64));
-}
-
-static int get_devid(struct irq_alloc_info *info)
-{
-       int devid = -1;
-
-       switch (info->type) {
-       case X86_IRQ_ALLOC_TYPE_IOAPIC:
-               devid     = get_ioapic_devid(info->ioapic_id);
-               break;
-       case X86_IRQ_ALLOC_TYPE_HPET:
-               devid     = get_hpet_devid(info->hpet_id);
-               break;
-       case X86_IRQ_ALLOC_TYPE_MSI:
-       case X86_IRQ_ALLOC_TYPE_MSIX:
-               devid = get_device_id(&info->msi_dev->dev);
-               break;
-       default:
-               BUG_ON(1);
-               break;
-       }
-
-       return devid;
-}
-
-static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
-{
-       struct amd_iommu *iommu;
-       int devid;
-
-       if (!info)
-               return NULL;
-
-       devid = get_devid(info);
-       if (devid >= 0) {
-               iommu = amd_iommu_rlookup_table[devid];
-               if (iommu)
-                       return iommu->ir_domain;
-       }
-
-       return NULL;
-}
-
-static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
-{
-       struct amd_iommu *iommu;
-       int devid;
-
-       if (!info)
-               return NULL;
-
-       switch (info->type) {
-       case X86_IRQ_ALLOC_TYPE_MSI:
-       case X86_IRQ_ALLOC_TYPE_MSIX:
-               devid = get_device_id(&info->msi_dev->dev);
-               if (devid < 0)
-                       return NULL;
-
-               iommu = amd_iommu_rlookup_table[devid];
-               if (iommu)
-                       return iommu->msi_domain;
-               break;
-       default:
-               break;
-       }
-
-       return NULL;
-}
-
-struct irq_remap_ops amd_iommu_irq_ops = {
-       .prepare                = amd_iommu_prepare,
-       .enable                 = amd_iommu_enable,
-       .disable                = amd_iommu_disable,
-       .reenable               = amd_iommu_reenable,
-       .enable_faulting        = amd_iommu_enable_faulting,
-       .get_ir_irq_domain      = get_ir_irq_domain,
-       .get_irq_domain         = get_irq_domain,
-};
-
-static void irq_remapping_prepare_irte(struct amd_ir_data *data,
-                                      struct irq_cfg *irq_cfg,
-                                      struct irq_alloc_info *info,
-                                      int devid, int index, int sub_handle)
-{
-       struct irq_2_irte *irte_info = &data->irq_2_irte;
-       struct msi_msg *msg = &data->msi_entry;
-       struct IO_APIC_route_entry *entry;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
-       if (!iommu)
-               return;
-
-       data->irq_2_irte.devid = devid;
-       data->irq_2_irte.index = index + sub_handle;
-       iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
-                                apic->irq_dest_mode, irq_cfg->vector,
-                                irq_cfg->dest_apicid, devid);
-
-       switch (info->type) {
-       case X86_IRQ_ALLOC_TYPE_IOAPIC:
-               /* Setup IOAPIC entry */
-               entry = info->ioapic_entry;
-               info->ioapic_entry = NULL;
-               memset(entry, 0, sizeof(*entry));
-               entry->vector        = index;
-               entry->mask          = 0;
-               entry->trigger       = info->ioapic_trigger;
-               entry->polarity      = info->ioapic_polarity;
-               /* Mask level triggered irqs. */
-               if (info->ioapic_trigger)
-                       entry->mask = 1;
-               break;
-
-       case X86_IRQ_ALLOC_TYPE_HPET:
-       case X86_IRQ_ALLOC_TYPE_MSI:
-       case X86_IRQ_ALLOC_TYPE_MSIX:
-               msg->address_hi = MSI_ADDR_BASE_HI;
-               msg->address_lo = MSI_ADDR_BASE_LO;
-               msg->data = irte_info->index;
-               break;
-
-       default:
-               BUG_ON(1);
-               break;
-       }
-}
-
-struct amd_irte_ops irte_32_ops = {
-       .prepare = irte_prepare,
-       .activate = irte_activate,
-       .deactivate = irte_deactivate,
-       .set_affinity = irte_set_affinity,
-       .set_allocated = irte_set_allocated,
-       .is_allocated = irte_is_allocated,
-       .clear_allocated = irte_clear_allocated,
-};
-
-struct amd_irte_ops irte_128_ops = {
-       .prepare = irte_ga_prepare,
-       .activate = irte_ga_activate,
-       .deactivate = irte_ga_deactivate,
-       .set_affinity = irte_ga_set_affinity,
-       .set_allocated = irte_ga_set_allocated,
-       .is_allocated = irte_ga_is_allocated,
-       .clear_allocated = irte_ga_clear_allocated,
-};
-
-static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
-                              unsigned int nr_irqs, void *arg)
-{
-       struct irq_alloc_info *info = arg;
-       struct irq_data *irq_data;
-       struct amd_ir_data *data = NULL;
-       struct irq_cfg *cfg;
-       int i, ret, devid;
-       int index;
-
-       if (!info)
-               return -EINVAL;
-       if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
-           info->type != X86_IRQ_ALLOC_TYPE_MSIX)
-               return -EINVAL;
-
-       /*
-        * With IRQ remapping enabled, don't need contiguous CPU vectors
-        * to support multiple MSI interrupts.
-        */
-       if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
-               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-
-       devid = get_devid(info);
-       if (devid < 0)
-               return -EINVAL;
-
-       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
-       if (ret < 0)
-               return ret;
-
-       if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
-               struct irq_remap_table *table;
-               struct amd_iommu *iommu;
-
-               table = alloc_irq_table(devid, NULL);
-               if (table) {
-                       if (!table->min_index) {
-                               /*
-                                * Keep the first 32 indexes free for IOAPIC
-                                * interrupts.
-                                */
-                               table->min_index = 32;
-                               iommu = amd_iommu_rlookup_table[devid];
-                               for (i = 0; i < 32; ++i)
-                                       iommu->irte_ops->set_allocated(table, i);
-                       }
-                       WARN_ON(table->min_index != 32);
-                       index = info->ioapic_pin;
-               } else {
-                       index = -ENOMEM;
-               }
-       } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
-                  info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
-               bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
-
-               index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
-       } else {
-               index = alloc_irq_index(devid, nr_irqs, false, NULL);
-       }
-
-       if (index < 0) {
-               pr_warn("Failed to allocate IRTE\n");
-               ret = index;
-               goto out_free_parent;
-       }
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_data = irq_domain_get_irq_data(domain, virq + i);
-               cfg = irqd_cfg(irq_data);
-               if (!irq_data || !cfg) {
-                       ret = -EINVAL;
-                       goto out_free_data;
-               }
-
-               ret = -ENOMEM;
-               data = kzalloc(sizeof(*data), GFP_KERNEL);
-               if (!data)
-                       goto out_free_data;
-
-               if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
-                       data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
-               else
-                       data->entry = kzalloc(sizeof(struct irte_ga),
-                                                    GFP_KERNEL);
-               if (!data->entry) {
-                       kfree(data);
-                       goto out_free_data;
-               }
-
-               irq_data->hwirq = (devid << 16) + i;
-               irq_data->chip_data = data;
-               irq_data->chip = &amd_ir_chip;
-               irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
-               irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
-       }
-
-       return 0;
-
-out_free_data:
-       for (i--; i >= 0; i--) {
-               irq_data = irq_domain_get_irq_data(domain, virq + i);
-               if (irq_data)
-                       kfree(irq_data->chip_data);
-       }
-       for (i = 0; i < nr_irqs; i++)
-               free_irte(devid, index + i);
-out_free_parent:
-       irq_domain_free_irqs_common(domain, virq, nr_irqs);
-       return ret;
-}
-
-static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
-                              unsigned int nr_irqs)
-{
-       struct irq_2_irte *irte_info;
-       struct irq_data *irq_data;
-       struct amd_ir_data *data;
-       int i;
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_data = irq_domain_get_irq_data(domain, virq  + i);
-               if (irq_data && irq_data->chip_data) {
-                       data = irq_data->chip_data;
-                       irte_info = &data->irq_2_irte;
-                       free_irte(irte_info->devid, irte_info->index);
-                       kfree(data->entry);
-                       kfree(data);
-               }
-       }
-       irq_domain_free_irqs_common(domain, virq, nr_irqs);
-}
-
-static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
-                              struct amd_ir_data *ir_data,
-                              struct irq_2_irte *irte_info,
-                              struct irq_cfg *cfg);
-
-static int irq_remapping_activate(struct irq_domain *domain,
-                                 struct irq_data *irq_data, bool reserve)
-{
-       struct amd_ir_data *data = irq_data->chip_data;
-       struct irq_2_irte *irte_info = &data->irq_2_irte;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
-       struct irq_cfg *cfg = irqd_cfg(irq_data);
-
-       if (!iommu)
-               return 0;
-
-       iommu->irte_ops->activate(data->entry, irte_info->devid,
-                                 irte_info->index);
-       amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
-       return 0;
-}
-
-static void irq_remapping_deactivate(struct irq_domain *domain,
-                                    struct irq_data *irq_data)
-{
-       struct amd_ir_data *data = irq_data->chip_data;
-       struct irq_2_irte *irte_info = &data->irq_2_irte;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
-
-       if (iommu)
-               iommu->irte_ops->deactivate(data->entry, irte_info->devid,
-                                           irte_info->index);
-}
-
-static const struct irq_domain_ops amd_ir_domain_ops = {
-       .alloc = irq_remapping_alloc,
-       .free = irq_remapping_free,
-       .activate = irq_remapping_activate,
-       .deactivate = irq_remapping_deactivate,
-};
-
-int amd_iommu_activate_guest_mode(void *data)
-{
-       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
-       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-           !entry || entry->lo.fields_vapic.guest_mode)
-               return 0;
-
-       entry->lo.val = 0;
-       entry->hi.val = 0;
-
-       entry->lo.fields_vapic.guest_mode  = 1;
-       entry->lo.fields_vapic.ga_log_intr = 1;
-       entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
-       entry->hi.fields.vector            = ir_data->ga_vector;
-       entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
-
-       return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, ir_data);
-}
-EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
-
-int amd_iommu_deactivate_guest_mode(void *data)
-{
-       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
-       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-       struct irq_cfg *cfg = ir_data->cfg;
-
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-           !entry || !entry->lo.fields_vapic.guest_mode)
-               return 0;
-
-       entry->lo.val = 0;
-       entry->hi.val = 0;
-
-       entry->lo.fields_remap.dm          = apic->irq_dest_mode;
-       entry->lo.fields_remap.int_type    = apic->irq_delivery_mode;
-       entry->hi.fields.vector            = cfg->vector;
-       entry->lo.fields_remap.destination =
-                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
-       entry->hi.fields.destination =
-                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
-
-       return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, ir_data);
-}
-EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
-
-static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
-{
-       int ret;
-       struct amd_iommu *iommu;
-       struct amd_iommu_pi_data *pi_data = vcpu_info;
-       struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
-       struct amd_ir_data *ir_data = data->chip_data;
-       struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
-       struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
-
-       /* Note:
-        * This device has never been set up for guest mode.
-        * we should not modify the IRTE
-        */
-       if (!dev_data || !dev_data->use_vapic)
-               return 0;
-
-       ir_data->cfg = irqd_cfg(data);
-       pi_data->ir_data = ir_data;
-
-       /* Note:
-        * SVM tries to set up for VAPIC mode, but we are in
-        * legacy mode. So, we force legacy mode instead.
-        */
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
-               pr_debug("%s: Fall back to using intr legacy remap\n",
-                        __func__);
-               pi_data->is_guest_mode = false;
-       }
-
-       iommu = amd_iommu_rlookup_table[irte_info->devid];
-       if (iommu == NULL)
-               return -EINVAL;
-
-       pi_data->prev_ga_tag = ir_data->cached_ga_tag;
-       if (pi_data->is_guest_mode) {
-               ir_data->ga_root_ptr = (pi_data->base >> 12);
-               ir_data->ga_vector = vcpu_pi_info->vector;
-               ir_data->ga_tag = pi_data->ga_tag;
-               ret = amd_iommu_activate_guest_mode(ir_data);
-               if (!ret)
-                       ir_data->cached_ga_tag = pi_data->ga_tag;
-       } else {
-               ret = amd_iommu_deactivate_guest_mode(ir_data);
-
-               /*
-                * This communicates the ga_tag back to the caller
-                * so that it can do all the necessary clean up.
-                */
-               if (!ret)
-                       ir_data->cached_ga_tag = 0;
-       }
-
-       return ret;
-}
-
-
-static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
-                              struct amd_ir_data *ir_data,
-                              struct irq_2_irte *irte_info,
-                              struct irq_cfg *cfg)
-{
-
-       /*
-        * Atomically updates the IRTE with the new destination, vector
-        * and flushes the interrupt entry cache.
-        */
-       iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
-                                     irte_info->index, cfg->vector,
-                                     cfg->dest_apicid);
-}
-
-static int amd_ir_set_affinity(struct irq_data *data,
-                              const struct cpumask *mask, bool force)
-{
-       struct amd_ir_data *ir_data = data->chip_data;
-       struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
-       struct irq_cfg *cfg = irqd_cfg(data);
-       struct irq_data *parent = data->parent_data;
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
-       int ret;
-
-       if (!iommu)
-               return -ENODEV;
-
-       ret = parent->chip->irq_set_affinity(parent, mask, force);
-       if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
-               return ret;
-
-       amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
-       /*
-        * After this point, all the interrupts will start arriving
-        * at the new destination. So, time to cleanup the previous
-        * vector allocation.
-        */
-       send_cleanup_vector(cfg);
-
-       return IRQ_SET_MASK_OK_DONE;
-}
-
-static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
-{
-       struct amd_ir_data *ir_data = irq_data->chip_data;
-
-       *msg = ir_data->msi_entry;
-}
-
-static struct irq_chip amd_ir_chip = {
-       .name                   = "AMD-IR",
-       .irq_ack                = apic_ack_irq,
-       .irq_set_affinity       = amd_ir_set_affinity,
-       .irq_set_vcpu_affinity  = amd_ir_set_vcpu_affinity,
-       .irq_compose_msi_msg    = ir_compose_msi_msg,
-};
-
-int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
-{
-       struct fwnode_handle *fn;
-
-       fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
-       if (!fn)
-               return -ENOMEM;
-       iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
-       irq_domain_free_fwnode(fn);
-       if (!iommu->ir_domain)
-               return -ENOMEM;
-
-       iommu->ir_domain->parent = arch_get_ir_parent_domain();
-       iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
-                                                            "AMD-IR-MSI",
-                                                            iommu->index);
-       return 0;
-}
-
-int amd_iommu_update_ga(int cpu, bool is_run, void *data)
-{
-       unsigned long flags;
-       struct amd_iommu *iommu;
-       struct irq_remap_table *table;
-       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
-       int devid = ir_data->irq_2_irte.devid;
-       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-       struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
-
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-           !ref || !entry || !entry->lo.fields_vapic.guest_mode)
-               return 0;
-
-       iommu = amd_iommu_rlookup_table[devid];
-       if (!iommu)
-               return -ENODEV;
-
-       table = get_irq_table(devid);
-       if (!table)
-               return -ENODEV;
-
-       raw_spin_lock_irqsave(&table->lock, flags);
-
-       if (ref->lo.fields_vapic.guest_mode) {
-               if (cpu >= 0) {
-                       ref->lo.fields_vapic.destination =
-                                               APICID_TO_IRTE_DEST_LO(cpu);
-                       ref->hi.fields.destination =
-                                               APICID_TO_IRTE_DEST_HI(cpu);
-               }
-               ref->lo.fields_vapic.is_run = is_run;
-               barrier();
-       }
-
-       raw_spin_unlock_irqrestore(&table->lock, flags);
-
-       iommu_flush_irt(iommu, devid);
-       iommu_completion_wait(iommu);
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_update_ga);
-#endif
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
deleted file mode 100644 (file)
index f892992..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#ifndef AMD_IOMMU_H
-#define AMD_IOMMU_H
-
-#include <linux/iommu.h>
-
-#include "amd_iommu_types.h"
-
-extern int amd_iommu_get_num_iommus(void);
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
-extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
-extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
-
-#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
-#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
-#endif
-
-/* Needed for interrupt remapping */
-extern int amd_iommu_prepare(void);
-extern int amd_iommu_enable(void);
-extern void amd_iommu_disable(void);
-extern int amd_iommu_reenable(int);
-extern int amd_iommu_enable_faulting(void);
-extern int amd_iommu_guest_ir;
-
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
-extern bool amd_iommu_v2_supported(void);
-extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
-                               u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
-                                    unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
-
-#ifdef CONFIG_IRQ_REMAP
-extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
-#else
-static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
-{
-       return 0;
-}
-#endif
-
-#define PPR_SUCCESS                    0x0
-#define PPR_INVALID                    0x1
-#define PPR_FAILURE                    0xf
-
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
-                                 int status, int tag);
-
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
-       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
-              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
-{
-       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
-               return false;
-
-       return !!(iommu->features & f);
-}
-
-static inline u64 iommu_virt_to_phys(void *vaddr)
-{
-       return (u64)__sme_set(virt_to_phys(vaddr));
-}
-
-static inline void *iommu_phys_to_virt(unsigned long paddr)
-{
-       return phys_to_virt(__sme_clr(paddr));
-}
-
-extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
-                                        struct device *dev);
-extern int __init add_special_device(u8 type, u8 id, u16 *devid,
-                                    bool cmd_line);
-
-#ifdef CONFIG_DMI
-void amd_iommu_apply_ivrs_quirks(void);
-#else
-static void amd_iommu_apply_ivrs_quirks(void) { }
-#endif
-
-#endif
diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c
deleted file mode 100644 (file)
index 545372f..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * AMD IOMMU driver
- *
- * Copyright (C) 2018 Advanced Micro Devices, Inc.
- *
- * Author: Gary R Hook <gary.hook@amd.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/pci.h>
-
-#include "amd_iommu.h"
-
-static struct dentry *amd_iommu_debugfs;
-static DEFINE_MUTEX(amd_iommu_debugfs_lock);
-
-#define        MAX_NAME_LEN    20
-
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
-{
-       char name[MAX_NAME_LEN + 1];
-
-       mutex_lock(&amd_iommu_debugfs_lock);
-       if (!amd_iommu_debugfs)
-               amd_iommu_debugfs = debugfs_create_dir("amd",
-                                                      iommu_debugfs_dir);
-       mutex_unlock(&amd_iommu_debugfs_lock);
-
-       snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
-       iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
-}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
deleted file mode 100644 (file)
index 3faff7f..0000000
+++ /dev/null
@@ -1,3174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- *         Leo Duran <leo.duran@amd.com>
- */
-
-#define pr_fmt(fmt)     "AMD-Vi: " fmt
-#define dev_fmt(fmt)    pr_fmt(fmt)
-
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <linux/list.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/syscore_ops.h>
-#include <linux/interrupt.h>
-#include <linux/msi.h>
-#include <linux/amd-iommu.h>
-#include <linux/export.h>
-#include <linux/kmemleak.h>
-#include <linux/mem_encrypt.h>
-#include <asm/pci-direct.h>
-#include <asm/iommu.h>
-#include <asm/apic.h>
-#include <asm/msidef.h>
-#include <asm/gart.h>
-#include <asm/x86_init.h>
-#include <asm/iommu_table.h>
-#include <asm/io_apic.h>
-#include <asm/irq_remapping.h>
-
-#include <linux/crash_dump.h>
-
-#include "amd_iommu.h"
-#include "irq_remapping.h"
-
-/*
- * definitions for the ACPI scanning code
- */
-#define IVRS_HEADER_LENGTH 48
-
-#define ACPI_IVHD_TYPE_MAX_SUPPORTED   0x40
-#define ACPI_IVMD_TYPE_ALL              0x20
-#define ACPI_IVMD_TYPE                  0x21
-#define ACPI_IVMD_TYPE_RANGE            0x22
-
-#define IVHD_DEV_ALL                    0x01
-#define IVHD_DEV_SELECT                 0x02
-#define IVHD_DEV_SELECT_RANGE_START     0x03
-#define IVHD_DEV_RANGE_END              0x04
-#define IVHD_DEV_ALIAS                  0x42
-#define IVHD_DEV_ALIAS_RANGE            0x43
-#define IVHD_DEV_EXT_SELECT             0x46
-#define IVHD_DEV_EXT_SELECT_RANGE       0x47
-#define IVHD_DEV_SPECIAL               0x48
-#define IVHD_DEV_ACPI_HID              0xf0
-
-#define UID_NOT_PRESENT                 0
-#define UID_IS_INTEGER                  1
-#define UID_IS_CHARACTER                2
-
-#define IVHD_SPECIAL_IOAPIC            1
-#define IVHD_SPECIAL_HPET              2
-
-#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
-#define IVHD_FLAG_PASSPW_EN_MASK        0x02
-#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
-#define IVHD_FLAG_ISOC_EN_MASK          0x08
-
-#define IVMD_FLAG_EXCL_RANGE            0x08
-#define IVMD_FLAG_IW                    0x04
-#define IVMD_FLAG_IR                    0x02
-#define IVMD_FLAG_UNITY_MAP             0x01
-
-#define ACPI_DEVFLAG_INITPASS           0x01
-#define ACPI_DEVFLAG_EXTINT             0x02
-#define ACPI_DEVFLAG_NMI                0x04
-#define ACPI_DEVFLAG_SYSMGT1            0x10
-#define ACPI_DEVFLAG_SYSMGT2            0x20
-#define ACPI_DEVFLAG_LINT0              0x40
-#define ACPI_DEVFLAG_LINT1              0x80
-#define ACPI_DEVFLAG_ATSDIS             0x10000000
-
-#define LOOP_TIMEOUT   100000
-/*
- * ACPI table definitions
- *
- * These data structures are laid over the table to parse the important values
- * out of it.
- */
-
-extern const struct iommu_ops amd_iommu_ops;
-
-/*
- * structure describing one IOMMU in the ACPI table. Typically followed by one
- * or more ivhd_entrys.
- */
-struct ivhd_header {
-       u8 type;
-       u8 flags;
-       u16 length;
-       u16 devid;
-       u16 cap_ptr;
-       u64 mmio_phys;
-       u16 pci_seg;
-       u16 info;
-       u32 efr_attr;
-
-       /* Following only valid on IVHD type 11h and 40h */
-       u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
-       u64 res;
-} __attribute__((packed));
-
-/*
- * A device entry describing which devices a specific IOMMU translates and
- * which requestor ids they use.
- */
-struct ivhd_entry {
-       u8 type;
-       u16 devid;
-       u8 flags;
-       u32 ext;
-       u32 hidh;
-       u64 cid;
-       u8 uidf;
-       u8 uidl;
-       u8 uid;
-} __attribute__((packed));
-
-/*
- * An AMD IOMMU memory definition structure. It defines things like exclusion
- * ranges for devices and regions that should be unity mapped.
- */
-struct ivmd_header {
-       u8 type;
-       u8 flags;
-       u16 length;
-       u16 devid;
-       u16 aux;
-       u64 resv;
-       u64 range_start;
-       u64 range_length;
-} __attribute__((packed));
-
-bool amd_iommu_dump;
-bool amd_iommu_irq_remap __read_mostly;
-
-int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
-
-static bool amd_iommu_detected;
-static bool __initdata amd_iommu_disabled;
-static int amd_iommu_target_ivhd_type;
-
-u16 amd_iommu_last_bdf;                        /* largest PCI device id we have
-                                          to handle */
-LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
-                                          we find in ACPI */
-bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
-
-LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
-                                          system */
-
-/* Array to assign indices to IOMMUs*/
-struct amd_iommu *amd_iommus[MAX_IOMMUS];
-
-/* Number of IOMMUs present in the system */
-static int amd_iommus_present;
-
-/* IOMMUs have a non-present cache? */
-bool amd_iommu_np_cache __read_mostly;
-bool amd_iommu_iotlb_sup __read_mostly = true;
-
-u32 amd_iommu_max_pasid __read_mostly = ~0;
-
-bool amd_iommu_v2_present __read_mostly;
-static bool amd_iommu_pc_present __read_mostly;
-
-bool amd_iommu_force_isolation __read_mostly;
-
-/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-/*
- * Pointer to a device table which the content of old device table
- * will be copied to. It's only be used in kdump kernel.
- */
-static struct dev_table_entry *old_dev_tbl_cpy;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-EXPORT_SYMBOL(amd_iommu_rlookup_table);
-
-/*
- * This table is used to find the irq remapping table for a given device id
- * quickly.
- */
-struct irq_remap_table **irq_lookup_table;
-
-/*
- * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
- * to know which ones are already in use.
- */
-unsigned long *amd_iommu_pd_alloc_bitmap;
-
-static u32 dev_table_size;     /* size of the device table */
-static u32 alias_table_size;   /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
-enum iommu_init_state {
-       IOMMU_START_STATE,
-       IOMMU_IVRS_DETECTED,
-       IOMMU_ACPI_FINISHED,
-       IOMMU_ENABLED,
-       IOMMU_PCI_INIT,
-       IOMMU_INTERRUPTS_EN,
-       IOMMU_DMA_OPS,
-       IOMMU_INITIALIZED,
-       IOMMU_NOT_FOUND,
-       IOMMU_INIT_ERROR,
-       IOMMU_CMDLINE_DISABLED,
-};
-
-/* Early ioapic and hpet maps from kernel command line */
-#define EARLY_MAP_SIZE         4
-static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
-static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
-static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
-
-static int __initdata early_ioapic_map_size;
-static int __initdata early_hpet_map_size;
-static int __initdata early_acpihid_map_size;
-
-static bool __initdata cmdline_maps;
-
-static enum iommu_init_state init_state = IOMMU_START_STATE;
-
-static int amd_iommu_enable_interrupts(void);
-static int __init iommu_go_to_state(enum iommu_init_state state);
-static void init_device_table_dma(void);
-
-static bool amd_iommu_pre_enabled = true;
-
-bool translation_pre_enabled(struct amd_iommu *iommu)
-{
-       return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
-}
-EXPORT_SYMBOL(translation_pre_enabled);
-
-static void clear_translation_pre_enabled(struct amd_iommu *iommu)
-{
-       iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
-}
-
-static void init_translation_status(struct amd_iommu *iommu)
-{
-       u64 ctrl;
-
-       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
-       if (ctrl & (1<<CONTROL_IOMMU_EN))
-               iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
-}
-
-static inline void update_last_devid(u16 devid)
-{
-       if (devid > amd_iommu_last_bdf)
-               amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
-{
-       unsigned shift = PAGE_SHIFT +
-                        get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
-
-       return 1UL << shift;
-}
-
-int amd_iommu_get_num_iommus(void)
-{
-       return amd_iommus_present;
-}
-
-/* Access to l1 and l2 indexed register spaces */
-
-static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
-{
-       u32 val;
-
-       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
-       pci_read_config_dword(iommu->dev, 0xfc, &val);
-       return val;
-}
-
-static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
-{
-       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
-       pci_write_config_dword(iommu->dev, 0xfc, val);
-       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
-}
-
-static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
-{
-       u32 val;
-
-       pci_write_config_dword(iommu->dev, 0xf0, address);
-       pci_read_config_dword(iommu->dev, 0xf4, &val);
-       return val;
-}
-
-static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
-{
-       pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
-       pci_write_config_dword(iommu->dev, 0xf4, val);
-}
-
-/****************************************************************************
- *
- * AMD IOMMU MMIO register space handling functions
- *
- * These functions are used to program the IOMMU device registers in
- * MMIO space required for that driver.
- *
- ****************************************************************************/
-
-/*
- * This function set the exclusion range in the IOMMU. DMA accesses to the
- * exclusion range are passed through untranslated
- */
-static void iommu_set_exclusion_range(struct amd_iommu *iommu)
-{
-       u64 start = iommu->exclusion_start & PAGE_MASK;
-       u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
-       u64 entry;
-
-       if (!iommu->exclusion_start)
-               return;
-
-       entry = start | MMIO_EXCL_ENABLE_MASK;
-       memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
-                       &entry, sizeof(entry));
-
-       entry = limit;
-       memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
-                       &entry, sizeof(entry));
-}
-
-/* Programs the physical address of the device table into the IOMMU hardware */
-static void iommu_set_device_table(struct amd_iommu *iommu)
-{
-       u64 entry;
-
-       BUG_ON(iommu->mmio_base == NULL);
-
-       entry = iommu_virt_to_phys(amd_iommu_dev_table);
-       entry |= (dev_table_size >> 12) - 1;
-       memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
-                       &entry, sizeof(entry));
-}
-
-/* Generic functions to enable/disable certain features of the IOMMU. */
-static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
-{
-       u64 ctrl;
-
-       ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
-       ctrl |= (1ULL << bit);
-       writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
-}
-
-static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
-{
-       u64 ctrl;
-
-       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
-       ctrl &= ~(1ULL << bit);
-       writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
-}
-
-static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
-{
-       u64 ctrl;
-
-       ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
-       ctrl &= ~CTRL_INV_TO_MASK;
-       ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
-       writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
-}
-
-/* Function to enable the hardware */
-static void iommu_enable(struct amd_iommu *iommu)
-{
-       iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
-}
-
-static void iommu_disable(struct amd_iommu *iommu)
-{
-       if (!iommu->mmio_base)
-               return;
-
-       /* Disable command buffer */
-       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
-
-       /* Disable event logging and event interrupts */
-       iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
-       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
-
-       /* Disable IOMMU GA_LOG */
-       iommu_feature_disable(iommu, CONTROL_GALOG_EN);
-       iommu_feature_disable(iommu, CONTROL_GAINT_EN);
-
-       /* Disable IOMMU hardware itself */
-       iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
-}
-
-/*
- * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
- * the system has one.
- */
-static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
-{
-       if (!request_mem_region(address, end, "amd_iommu")) {
-               pr_err("Can not reserve memory region %llx-%llx for mmio\n",
-                       address, end);
-               pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
-               return NULL;
-       }
-
-       return (u8 __iomem *)ioremap(address, end);
-}
-
-static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
-{
-       if (iommu->mmio_base)
-               iounmap(iommu->mmio_base);
-       release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
-}
-
-static inline u32 get_ivhd_header_size(struct ivhd_header *h)
-{
-       u32 size = 0;
-
-       switch (h->type) {
-       case 0x10:
-               size = 24;
-               break;
-       case 0x11:
-       case 0x40:
-               size = 40;
-               break;
-       }
-       return size;
-}
-
-/****************************************************************************
- *
- * The functions below belong to the first pass of AMD IOMMU ACPI table
- * parsing. In this pass we try to find out the highest device id this
- * code has to handle. Upon this information the size of the shared data
- * structures is determined later.
- *
- ****************************************************************************/
-
-/*
- * This function calculates the length of a given IVHD entry
- */
-static inline int ivhd_entry_length(u8 *ivhd)
-{
-       u32 type = ((struct ivhd_entry *)ivhd)->type;
-
-       if (type < 0x80) {
-               return 0x04 << (*ivhd >> 6);
-       } else if (type == IVHD_DEV_ACPI_HID) {
-               /* For ACPI_HID, offset 21 is uid len */
-               return *((u8 *)ivhd + 21) + 22;
-       }
-       return 0;
-}
-
-/*
- * After reading the highest device id from the IOMMU PCI capability header
- * this function looks if there is a higher device id defined in the ACPI table
- */
-static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
-{
-       u8 *p = (void *)h, *end = (void *)h;
-       struct ivhd_entry *dev;
-
-       u32 ivhd_size = get_ivhd_header_size(h);
-
-       if (!ivhd_size) {
-               pr_err("Unsupported IVHD type %#x\n", h->type);
-               return -EINVAL;
-       }
-
-       p += ivhd_size;
-       end += h->length;
-
-       while (p < end) {
-               dev = (struct ivhd_entry *)p;
-               switch (dev->type) {
-               case IVHD_DEV_ALL:
-                       /* Use maximum BDF value for DEV_ALL */
-                       update_last_devid(0xffff);
-                       break;
-               case IVHD_DEV_SELECT:
-               case IVHD_DEV_RANGE_END:
-               case IVHD_DEV_ALIAS:
-               case IVHD_DEV_EXT_SELECT:
-                       /* all the above subfield types refer to device ids */
-                       update_last_devid(dev->devid);
-                       break;
-               default:
-                       break;
-               }
-               p += ivhd_entry_length(p);
-       }
-
-       WARN_ON(p != end);
-
-       return 0;
-}
-
-static int __init check_ivrs_checksum(struct acpi_table_header *table)
-{
-       int i;
-       u8 checksum = 0, *p = (u8 *)table;
-
-       for (i = 0; i < table->length; ++i)
-               checksum += p[i];
-       if (checksum != 0) {
-               /* ACPI table corrupt */
-               pr_err(FW_BUG "IVRS invalid checksum\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/*
- * Iterate over all IVHD entries in the ACPI table and find the highest device
- * id which we need to handle. This is the first of three functions which parse
- * the ACPI table. So we check the checksum here.
- */
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
-{
-       u8 *p = (u8 *)table, *end = (u8 *)table;
-       struct ivhd_header *h;
-
-       p += IVRS_HEADER_LENGTH;
-
-       end += table->length;
-       while (p < end) {
-               h = (struct ivhd_header *)p;
-               if (h->type == amd_iommu_target_ivhd_type) {
-                       int ret = find_last_devid_from_ivhd(h);
-
-                       if (ret)
-                               return ret;
-               }
-               p += h->length;
-       }
-       WARN_ON(p != end);
-
-       return 0;
-}
-
-/****************************************************************************
- *
- * The following functions belong to the code path which parses the ACPI table
- * the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
- *
- ****************************************************************************/
-
-/*
- * Allocates the command buffer. This buffer is per AMD IOMMU. We can
- * write commands to that buffer later and the IOMMU will execute them
- * asynchronously
- */
-static int __init alloc_command_buffer(struct amd_iommu *iommu)
-{
-       iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                                 get_order(CMD_BUFFER_SIZE));
-
-       return iommu->cmd_buf ? 0 : -ENOMEM;
-}
-
-/*
- * This function resets the command buffer if the IOMMU stopped fetching
- * commands from it.
- */
-void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
-{
-       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
-
-       writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
-       writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
-       iommu->cmd_buf_head = 0;
-       iommu->cmd_buf_tail = 0;
-
-       iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
-}
-
-/*
- * This function writes the command buffer address to the hardware and
- * enables it.
- */
-static void iommu_enable_command_buffer(struct amd_iommu *iommu)
-{
-       u64 entry;
-
-       BUG_ON(iommu->cmd_buf == NULL);
-
-       entry = iommu_virt_to_phys(iommu->cmd_buf);
-       entry |= MMIO_CMD_SIZE_512;
-
-       memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
-                   &entry, sizeof(entry));
-
-       amd_iommu_reset_cmd_buffer(iommu);
-}
-
-/*
- * This function disables the command buffer
- */
-static void iommu_disable_command_buffer(struct amd_iommu *iommu)
-{
-       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
-}
-
-static void __init free_command_buffer(struct amd_iommu *iommu)
-{
-       free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
-}
-
-/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_event_buffer(struct amd_iommu *iommu)
-{
-       iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                                 get_order(EVT_BUFFER_SIZE));
-
-       return iommu->evt_buf ? 0 : -ENOMEM;
-}
-
-static void iommu_enable_event_buffer(struct amd_iommu *iommu)
-{
-       u64 entry;
-
-       BUG_ON(iommu->evt_buf == NULL);
-
-       entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
-
-       memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
-                   &entry, sizeof(entry));
-
-       /* set head and tail to zero manually */
-       writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-       writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
-
-       iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
-}
-
-/*
- * This function disables the event log buffer
- */
-static void iommu_disable_event_buffer(struct amd_iommu *iommu)
-{
-       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
-}
-
-static void __init free_event_buffer(struct amd_iommu *iommu)
-{
-       free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
-}
-
-/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_ppr_log(struct amd_iommu *iommu)
-{
-       iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                                 get_order(PPR_LOG_SIZE));
-
-       return iommu->ppr_log ? 0 : -ENOMEM;
-}
-
-static void iommu_enable_ppr_log(struct amd_iommu *iommu)
-{
-       u64 entry;
-
-       if (iommu->ppr_log == NULL)
-               return;
-
-       entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
-
-       memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
-                   &entry, sizeof(entry));
-
-       /* set head and tail to zero manually */
-       writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-       writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-
-       iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
-       iommu_feature_enable(iommu, CONTROL_PPR_EN);
-}
-
-static void __init free_ppr_log(struct amd_iommu *iommu)
-{
-       if (iommu->ppr_log == NULL)
-               return;
-
-       free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
-}
-
-static void free_ga_log(struct amd_iommu *iommu)
-{
-#ifdef CONFIG_IRQ_REMAP
-       if (iommu->ga_log)
-               free_pages((unsigned long)iommu->ga_log,
-                           get_order(GA_LOG_SIZE));
-       if (iommu->ga_log_tail)
-               free_pages((unsigned long)iommu->ga_log_tail,
-                           get_order(8));
-#endif
-}
-
-static int iommu_ga_log_enable(struct amd_iommu *iommu)
-{
-#ifdef CONFIG_IRQ_REMAP
-       u32 status, i;
-
-       if (!iommu->ga_log)
-               return -EINVAL;
-
-       status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-
-       /* Check if already running */
-       if (status & (MMIO_STATUS_GALOG_RUN_MASK))
-               return 0;
-
-       iommu_feature_enable(iommu, CONTROL_GAINT_EN);
-       iommu_feature_enable(iommu, CONTROL_GALOG_EN);
-
-       for (i = 0; i < LOOP_TIMEOUT; ++i) {
-               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-               if (status & (MMIO_STATUS_GALOG_RUN_MASK))
-                       break;
-       }
-
-       if (i >= LOOP_TIMEOUT)
-               return -EINVAL;
-#endif /* CONFIG_IRQ_REMAP */
-       return 0;
-}
-
-#ifdef CONFIG_IRQ_REMAP
-static int iommu_init_ga_log(struct amd_iommu *iommu)
-{
-       u64 entry;
-
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
-               return 0;
-
-       iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                       get_order(GA_LOG_SIZE));
-       if (!iommu->ga_log)
-               goto err_out;
-
-       iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                       get_order(8));
-       if (!iommu->ga_log_tail)
-               goto err_out;
-
-       entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
-       memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
-                   &entry, sizeof(entry));
-       entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
-                (BIT_ULL(52)-1)) & ~7ULL;
-       memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
-                   &entry, sizeof(entry));
-       writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
-       writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
-
-       return 0;
-err_out:
-       free_ga_log(iommu);
-       return -EINVAL;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
-static int iommu_init_ga(struct amd_iommu *iommu)
-{
-       int ret = 0;
-
-#ifdef CONFIG_IRQ_REMAP
-       /* Note: We have already checked GASup from IVRS table.
-        *       Now, we need to make sure that GAMSup is set.
-        */
-       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
-           !iommu_feature(iommu, FEATURE_GAM_VAPIC))
-               amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
-       ret = iommu_init_ga_log(iommu);
-#endif /* CONFIG_IRQ_REMAP */
-
-       return ret;
-}
-
-static void iommu_enable_xt(struct amd_iommu *iommu)
-{
-#ifdef CONFIG_IRQ_REMAP
-       /*
-        * XT mode (32-bit APIC destination ID) requires
-        * GA mode (128-bit IRTE support) as a prerequisite.
-        */
-       if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
-           amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
-               iommu_feature_enable(iommu, CONTROL_XT_EN);
-#endif /* CONFIG_IRQ_REMAP */
-}
-
-static void iommu_enable_gt(struct amd_iommu *iommu)
-{
-       if (!iommu_feature(iommu, FEATURE_GT))
-               return;
-
-       iommu_feature_enable(iommu, CONTROL_GT_EN);
-}
-
-/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
-{
-       int i = (bit >> 6) & 0x03;
-       int _bit = bit & 0x3f;
-
-       amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
-}
-
-static int get_dev_entry_bit(u16 devid, u8 bit)
-{
-       int i = (bit >> 6) & 0x03;
-       int _bit = bit & 0x3f;
-
-       return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
-}
-
-
-static bool copy_device_table(void)
-{
-       u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
-       struct dev_table_entry *old_devtb = NULL;
-       u32 lo, hi, devid, old_devtb_size;
-       phys_addr_t old_devtb_phys;
-       struct amd_iommu *iommu;
-       u16 dom_id, dte_v, irq_v;
-       gfp_t gfp_flag;
-       u64 tmp;
-
-       if (!amd_iommu_pre_enabled)
-               return false;
-
-       pr_warn("Translation is already enabled - trying to copy translation structures\n");
-       for_each_iommu(iommu) {
-               /* All IOMMUs should use the same device table with the same size */
-               lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
-               hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
-               entry = (((u64) hi) << 32) + lo;
-               if (last_entry && last_entry != entry) {
-                       pr_err("IOMMU:%d should use the same dev table as others!\n",
-                               iommu->index);
-                       return false;
-               }
-               last_entry = entry;
-
-               old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
-               if (old_devtb_size != dev_table_size) {
-                       pr_err("The device table size of IOMMU:%d is not expected!\n",
-                               iommu->index);
-                       return false;
-               }
-       }
-
-       /*
-        * When SME is enabled in the first kernel, the entry includes the
-        * memory encryption mask(sme_me_mask), we must remove the memory
-        * encryption mask to obtain the true physical address in kdump kernel.
-        */
-       old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
-
-       if (old_devtb_phys >= 0x100000000ULL) {
-               pr_err("The address of old device table is above 4G, not trustworthy!\n");
-               return false;
-       }
-       old_devtb = (sme_active() && is_kdump_kernel())
-                   ? (__force void *)ioremap_encrypted(old_devtb_phys,
-                                                       dev_table_size)
-                   : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
-
-       if (!old_devtb)
-               return false;
-
-       gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
-       old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
-                               get_order(dev_table_size));
-       if (old_dev_tbl_cpy == NULL) {
-               pr_err("Failed to allocate memory for copying old device table!\n");
-               return false;
-       }
-
-       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
-               old_dev_tbl_cpy[devid] = old_devtb[devid];
-               dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
-               dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
-
-               if (dte_v && dom_id) {
-                       old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
-                       old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
-                       __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
-                       /* If gcr3 table existed, mask it out */
-                       if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
-                               tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
-                               tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
-                               old_dev_tbl_cpy[devid].data[1] &= ~tmp;
-                               tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
-                               tmp |= DTE_FLAG_GV;
-                               old_dev_tbl_cpy[devid].data[0] &= ~tmp;
-                       }
-               }
-
-               irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
-               int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
-               int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
-               if (irq_v && (int_ctl || int_tab_len)) {
-                       if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
-                           (int_tab_len != DTE_IRQ_TABLE_LEN)) {
-                               pr_err("Wrong old irq remapping flag: %#x\n", devid);
-                               return false;
-                       }
-
-                       old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
-               }
-       }
-       memunmap(old_devtb);
-
-       return true;
-}
-
-void amd_iommu_apply_erratum_63(u16 devid)
-{
-       int sysmgt;
-
-       sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
-                (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
-
-       if (sysmgt == 0x01)
-               set_dev_entry_bit(devid, DEV_ENTRY_IW);
-}
-
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
-{
-       amd_iommu_rlookup_table[devid] = iommu;
-}
-
-/*
- * This function takes the device specific flags read from the ACPI
- * table and sets up the device table entry with that information
- */
-static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
-                                          u16 devid, u32 flags, u32 ext_flags)
-{
-       if (flags & ACPI_DEVFLAG_INITPASS)
-               set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
-       if (flags & ACPI_DEVFLAG_EXTINT)
-               set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
-       if (flags & ACPI_DEVFLAG_NMI)
-               set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
-       if (flags & ACPI_DEVFLAG_SYSMGT1)
-               set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
-       if (flags & ACPI_DEVFLAG_SYSMGT2)
-               set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
-       if (flags & ACPI_DEVFLAG_LINT0)
-               set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
-       if (flags & ACPI_DEVFLAG_LINT1)
-               set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
-
-       amd_iommu_apply_erratum_63(devid);
-
-       set_iommu_for_device(iommu, devid);
-}
-
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
-{
-       struct devid_map *entry;
-       struct list_head *list;
-
-       if (type == IVHD_SPECIAL_IOAPIC)
-               list = &ioapic_map;
-       else if (type == IVHD_SPECIAL_HPET)
-               list = &hpet_map;
-       else
-               return -EINVAL;
-
-       list_for_each_entry(entry, list, list) {
-               if (!(entry->id == id && entry->cmd_line))
-                       continue;
-
-               pr_info("Command-line override present for %s id %d - ignoring\n",
-                       type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
-
-               *devid = entry->devid;
-
-               return 0;
-       }
-
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
-
-       entry->id       = id;
-       entry->devid    = *devid;
-       entry->cmd_line = cmd_line;
-
-       list_add_tail(&entry->list, list);
-
-       return 0;
-}
-
-static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
-                                     bool cmd_line)
-{
-       struct acpihid_map_entry *entry;
-       struct list_head *list = &acpihid_map;
-
-       list_for_each_entry(entry, list, list) {
-               if (strcmp(entry->hid, hid) ||
-                   (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
-                   !entry->cmd_line)
-                       continue;
-
-               pr_info("Command-line override for hid:%s uid:%s\n",
-                       hid, uid);
-               *devid = entry->devid;
-               return 0;
-       }
-
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
-
-       memcpy(entry->uid, uid, strlen(uid));
-       memcpy(entry->hid, hid, strlen(hid));
-       entry->devid = *devid;
-       entry->cmd_line = cmd_line;
-       entry->root_devid = (entry->devid & (~0x7));
-
-       pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
-               entry->cmd_line ? "cmd" : "ivrs",
-               entry->hid, entry->uid, entry->root_devid);
-
-       list_add_tail(&entry->list, list);
-       return 0;
-}
-
-static int __init add_early_maps(void)
-{
-       int i, ret;
-
-       for (i = 0; i < early_ioapic_map_size; ++i) {
-               ret = add_special_device(IVHD_SPECIAL_IOAPIC,
-                                        early_ioapic_map[i].id,
-                                        &early_ioapic_map[i].devid,
-                                        early_ioapic_map[i].cmd_line);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < early_hpet_map_size; ++i) {
-               ret = add_special_device(IVHD_SPECIAL_HPET,
-                                        early_hpet_map[i].id,
-                                        &early_hpet_map[i].devid,
-                                        early_hpet_map[i].cmd_line);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < early_acpihid_map_size; ++i) {
-               ret = add_acpi_hid_device(early_acpihid_map[i].hid,
-                                         early_acpihid_map[i].uid,
-                                         &early_acpihid_map[i].devid,
-                                         early_acpihid_map[i].cmd_line);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-/*
- * Reads the device exclusion range from ACPI and initializes the IOMMU with
- * it
- */
-static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
-{
-       if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
-               return;
-
-       /*
-        * Treat per-device exclusion ranges as r/w unity-mapped regions
-        * since some buggy BIOSes might lead to the overwritten exclusion
-        * range (exclusion_start and exclusion_length members). This
-        * happens when there are multiple exclusion ranges (IVMD entries)
-        * defined in ACPI table.
-        */
-       m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
-}
-
-/*
- * Takes a pointer to an AMD IOMMU entry in the ACPI table and
- * initializes the hardware and our data structures with it.
- */
-static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
-                                       struct ivhd_header *h)
-{
-       u8 *p = (u8 *)h;
-       u8 *end = p, flags = 0;
-       u16 devid = 0, devid_start = 0, devid_to = 0;
-       u32 dev_i, ext_flags = 0;
-       bool alias = false;
-       struct ivhd_entry *e;
-       u32 ivhd_size;
-       int ret;
-
-
-       ret = add_early_maps();
-       if (ret)
-               return ret;
-
-       amd_iommu_apply_ivrs_quirks();
-
-       /*
-        * First save the recommended feature enable bits from ACPI
-        */
-       iommu->acpi_flags = h->flags;
-
-       /*
-        * Done. Now parse the device entries
-        */
-       ivhd_size = get_ivhd_header_size(h);
-       if (!ivhd_size) {
-               pr_err("Unsupported IVHD type %#x\n", h->type);
-               return -EINVAL;
-       }
-
-       p += ivhd_size;
-
-       end += h->length;
-
-
-       while (p < end) {
-               e = (struct ivhd_entry *)p;
-               switch (e->type) {
-               case IVHD_DEV_ALL:
-
-                       DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
-
-                       for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
-                               set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
-                       break;
-               case IVHD_DEV_SELECT:
-
-                       DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
-                                   "flags: %02x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags);
-
-                       devid = e->devid;
-                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
-                       break;
-               case IVHD_DEV_SELECT_RANGE_START:
-
-                       DUMP_printk("  DEV_SELECT_RANGE_START\t "
-                                   "devid: %02x:%02x.%x flags: %02x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags);
-
-                       devid_start = e->devid;
-                       flags = e->flags;
-                       ext_flags = 0;
-                       alias = false;
-                       break;
-               case IVHD_DEV_ALIAS:
-
-                       DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
-                                   "flags: %02x devid_to: %02x:%02x.%x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags,
-                                   PCI_BUS_NUM(e->ext >> 8),
-                                   PCI_SLOT(e->ext >> 8),
-                                   PCI_FUNC(e->ext >> 8));
-
-                       devid = e->devid;
-                       devid_to = e->ext >> 8;
-                       set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
-                       set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
-                       amd_iommu_alias_table[devid] = devid_to;
-                       break;
-               case IVHD_DEV_ALIAS_RANGE:
-
-                       DUMP_printk("  DEV_ALIAS_RANGE\t\t "
-                                   "devid: %02x:%02x.%x flags: %02x "
-                                   "devid_to: %02x:%02x.%x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags,
-                                   PCI_BUS_NUM(e->ext >> 8),
-                                   PCI_SLOT(e->ext >> 8),
-                                   PCI_FUNC(e->ext >> 8));
-
-                       devid_start = e->devid;
-                       flags = e->flags;
-                       devid_to = e->ext >> 8;
-                       ext_flags = 0;
-                       alias = true;
-                       break;
-               case IVHD_DEV_EXT_SELECT:
-
-                       DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
-                                   "flags: %02x ext: %08x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags, e->ext);
-
-                       devid = e->devid;
-                       set_dev_entry_from_acpi(iommu, devid, e->flags,
-                                               e->ext);
-                       break;
-               case IVHD_DEV_EXT_SELECT_RANGE:
-
-                       DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
-                                   "%02x:%02x.%x flags: %02x ext: %08x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid),
-                                   e->flags, e->ext);
-
-                       devid_start = e->devid;
-                       flags = e->flags;
-                       ext_flags = e->ext;
-                       alias = false;
-                       break;
-               case IVHD_DEV_RANGE_END:
-
-                       DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
-                                   PCI_BUS_NUM(e->devid),
-                                   PCI_SLOT(e->devid),
-                                   PCI_FUNC(e->devid));
-
-                       devid = e->devid;
-                       for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
-                               if (alias) {
-                                       amd_iommu_alias_table[dev_i] = devid_to;
-                                       set_dev_entry_from_acpi(iommu,
-                                               devid_to, flags, ext_flags);
-                               }
-                               set_dev_entry_from_acpi(iommu, dev_i,
-                                                       flags, ext_flags);
-                       }
-                       break;
-               case IVHD_DEV_SPECIAL: {
-                       u8 handle, type;
-                       const char *var;
-                       u16 devid;
-                       int ret;
-
-                       handle = e->ext & 0xff;
-                       devid  = (e->ext >>  8) & 0xffff;
-                       type   = (e->ext >> 24) & 0xff;
-
-                       if (type == IVHD_SPECIAL_IOAPIC)
-                               var = "IOAPIC";
-                       else if (type == IVHD_SPECIAL_HPET)
-                               var = "HPET";
-                       else
-                               var = "UNKNOWN";
-
-                       DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
-                                   var, (int)handle,
-                                   PCI_BUS_NUM(devid),
-                                   PCI_SLOT(devid),
-                                   PCI_FUNC(devid));
-
-                       ret = add_special_device(type, handle, &devid, false);
-                       if (ret)
-                               return ret;
-
-                       /*
-                        * add_special_device might update the devid in case a
-                        * command-line override is present. So call
-                        * set_dev_entry_from_acpi after add_special_device.
-                        */
-                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
-
-                       break;
-               }
-               case IVHD_DEV_ACPI_HID: {
-                       u16 devid;
-                       u8 hid[ACPIHID_HID_LEN];
-                       u8 uid[ACPIHID_UID_LEN];
-                       int ret;
-
-                       if (h->type != 0x40) {
-                               pr_err(FW_BUG "Invalid IVHD device type %#x\n",
-                                      e->type);
-                               break;
-                       }
-
-                       memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
-                       hid[ACPIHID_HID_LEN - 1] = '\0';
-
-                       if (!(*hid)) {
-                               pr_err(FW_BUG "Invalid HID.\n");
-                               break;
-                       }
-
-                       uid[0] = '\0';
-                       switch (e->uidf) {
-                       case UID_NOT_PRESENT:
-
-                               if (e->uidl != 0)
-                                       pr_warn(FW_BUG "Invalid UID length.\n");
-
-                               break;
-                       case UID_IS_INTEGER:
-
-                               sprintf(uid, "%d", e->uid);
-
-                               break;
-                       case UID_IS_CHARACTER:
-
-                               memcpy(uid, &e->uid, e->uidl);
-                               uid[e->uidl] = '\0';
-
-                               break;
-                       default:
-                               break;
-                       }
-
-                       devid = e->devid;
-                       DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
-                                   hid, uid,
-                                   PCI_BUS_NUM(devid),
-                                   PCI_SLOT(devid),
-                                   PCI_FUNC(devid));
-
-                       flags = e->flags;
-
-                       ret = add_acpi_hid_device(hid, uid, &devid, false);
-                       if (ret)
-                               return ret;
-
-                       /*
-                        * add_special_device might update the devid in case a
-                        * command-line override is present. So call
-                        * set_dev_entry_from_acpi after add_special_device.
-                        */
-                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
-
-                       break;
-               }
-               default:
-                       break;
-               }
-
-               p += ivhd_entry_length(p);
-       }
-
-       return 0;
-}
-
-static void __init free_iommu_one(struct amd_iommu *iommu)
-{
-       free_command_buffer(iommu);
-       free_event_buffer(iommu);
-       free_ppr_log(iommu);
-       free_ga_log(iommu);
-       iommu_unmap_mmio_space(iommu);
-}
-
-static void __init free_iommu_all(void)
-{
-       struct amd_iommu *iommu, *next;
-
-       for_each_iommu_safe(iommu, next) {
-               list_del(&iommu->list);
-               free_iommu_one(iommu);
-               kfree(iommu);
-       }
-}
-
-/*
- * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
- * Workaround:
- *     BIOS should disable L2B micellaneous clock gating by setting
- *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
- */
-static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
-{
-       u32 value;
-
-       if ((boot_cpu_data.x86 != 0x15) ||
-           (boot_cpu_data.x86_model < 0x10) ||
-           (boot_cpu_data.x86_model > 0x1f))
-               return;
-
-       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
-       pci_read_config_dword(iommu->dev, 0xf4, &value);
-
-       if (value & BIT(2))
-               return;
-
-       /* Select NB indirect register 0x90 and enable writing */
-       pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
-
-       pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
-       pci_info(iommu->dev, "Applying erratum 746 workaround\n");
-
-       /* Clear the enable writing bit */
-       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
-}
-
-/*
- * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
- * Workaround:
- *     BIOS should enable ATS write permission check by setting
- *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
- */
-static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
-{
-       u32 value;
-
-       if ((boot_cpu_data.x86 != 0x15) ||
-           (boot_cpu_data.x86_model < 0x30) ||
-           (boot_cpu_data.x86_model > 0x3f))
-               return;
-
-       /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
-       value = iommu_read_l2(iommu, 0x47);
-
-       if (value & BIT(0))
-               return;
-
-       /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
-       iommu_write_l2(iommu, 0x47, value | BIT(0));
-
-       pci_info(iommu->dev, "Applying ATS write check workaround\n");
-}
-
-/*
- * This function clues the initialization function for one IOMMU
- * together and also allocates the command buffer and programs the
- * hardware. It does NOT enable the IOMMU. This is done afterwards.
- */
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
-{
-       int ret;
-
-       raw_spin_lock_init(&iommu->lock);
-
-       /* Add IOMMU to internal data structures */
-       list_add_tail(&iommu->list, &amd_iommu_list);
-       iommu->index = amd_iommus_present++;
-
-       if (unlikely(iommu->index >= MAX_IOMMUS)) {
-               WARN(1, "System has more IOMMUs than supported by this driver\n");
-               return -ENOSYS;
-       }
-
-       /* Index is fine - add IOMMU to the array */
-       amd_iommus[iommu->index] = iommu;
-
-       /*
-        * Copy data from ACPI table entry to the iommu struct
-        */
-       iommu->devid   = h->devid;
-       iommu->cap_ptr = h->cap_ptr;
-       iommu->pci_seg = h->pci_seg;
-       iommu->mmio_phys = h->mmio_phys;
-
-       switch (h->type) {
-       case 0x10:
-               /* Check if IVHD EFR contains proper max banks/counters */
-               if ((h->efr_attr != 0) &&
-                   ((h->efr_attr & (0xF << 13)) != 0) &&
-                   ((h->efr_attr & (0x3F << 17)) != 0))
-                       iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
-               else
-                       iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
-               if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               break;
-       case 0x11:
-       case 0x40:
-               if (h->efr_reg & (1 << 9))
-                       iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
-               else
-                       iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
-               if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               /*
-                * Note: Since iommu_update_intcapxt() leverages
-                * the IOMMU MMIO access to MSI capability block registers
-                * for MSI address lo/hi/data, we need to check both
-                * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
-                */
-               if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
-                   (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
-                       amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
-                                               iommu->mmio_phys_end);
-       if (!iommu->mmio_base)
-               return -ENOMEM;
-
-       if (alloc_command_buffer(iommu))
-               return -ENOMEM;
-
-       if (alloc_event_buffer(iommu))
-               return -ENOMEM;
-
-       iommu->int_enabled = false;
-
-       init_translation_status(iommu);
-       if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
-               iommu_disable(iommu);
-               clear_translation_pre_enabled(iommu);
-               pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
-                       iommu->index);
-       }
-       if (amd_iommu_pre_enabled)
-               amd_iommu_pre_enabled = translation_pre_enabled(iommu);
-
-       ret = init_iommu_from_acpi(iommu, h);
-       if (ret)
-               return ret;
-
-       ret = amd_iommu_create_irq_domain(iommu);
-       if (ret)
-               return ret;
-
-       /*
-        * Make sure IOMMU is not considered to translate itself. The IVRS
-        * table tells us so, but this is a lie!
-        */
-       amd_iommu_rlookup_table[iommu->devid] = NULL;
-
-       return 0;
-}
-
-/**
- * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
- * @ivrs          Pointer to the IVRS header
- *
- * This function search through all IVDB of the maximum supported IVHD
- */
-static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
-{
-       u8 *base = (u8 *)ivrs;
-       struct ivhd_header *ivhd = (struct ivhd_header *)
-                                       (base + IVRS_HEADER_LENGTH);
-       u8 last_type = ivhd->type;
-       u16 devid = ivhd->devid;
-
-       while (((u8 *)ivhd - base < ivrs->length) &&
-              (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
-               u8 *p = (u8 *) ivhd;
-
-               if (ivhd->devid == devid)
-                       last_type = ivhd->type;
-               ivhd = (struct ivhd_header *)(p + ivhd->length);
-       }
-
-       return last_type;
-}
-
-/*
- * Iterates over all IOMMU entries in the ACPI table, allocates the
- * IOMMU structure and initializes it with init_iommu_one()
- */
-static int __init init_iommu_all(struct acpi_table_header *table)
-{
-       u8 *p = (u8 *)table, *end = (u8 *)table;
-       struct ivhd_header *h;
-       struct amd_iommu *iommu;
-       int ret;
-
-       end += table->length;
-       p += IVRS_HEADER_LENGTH;
-
-       while (p < end) {
-               h = (struct ivhd_header *)p;
-               if (*p == amd_iommu_target_ivhd_type) {
-
-                       DUMP_printk("device: %02x:%02x.%01x cap: %04x "
-                                   "seg: %d flags: %01x info %04x\n",
-                                   PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
-                                   PCI_FUNC(h->devid), h->cap_ptr,
-                                   h->pci_seg, h->flags, h->info);
-                       DUMP_printk("       mmio-addr: %016llx\n",
-                                   h->mmio_phys);
-
-                       iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
-                       if (iommu == NULL)
-                               return -ENOMEM;
-
-                       ret = init_iommu_one(iommu, h);
-                       if (ret)
-                               return ret;
-               }
-               p += h->length;
-
-       }
-       WARN_ON(p != end);
-
-       return 0;
-}
-
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
-                               u8 fxn, u64 *value, bool is_write);
-
-static void init_iommu_perf_ctr(struct amd_iommu *iommu)
-{
-       struct pci_dev *pdev = iommu->dev;
-       u64 val = 0xabcd, val2 = 0, save_reg = 0;
-
-       if (!iommu_feature(iommu, FEATURE_PC))
-               return;
-
-       amd_iommu_pc_present = true;
-
-       /* save the value to restore, if writable */
-       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
-               goto pc_false;
-
-       /* Check if the performance counters can be written to */
-       if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
-           (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
-           (val != val2))
-               goto pc_false;
-
-       /* restore */
-       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
-               goto pc_false;
-
-       pci_info(pdev, "IOMMU performance counters supported\n");
-
-       val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
-       iommu->max_banks = (u8) ((val >> 12) & 0x3f);
-       iommu->max_counters = (u8) ((val >> 7) & 0xf);
-
-       return;
-
-pc_false:
-       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
-       amd_iommu_pc_present = false;
-       return;
-}
-
-static ssize_t amd_iommu_show_cap(struct device *dev,
-                                 struct device_attribute *attr,
-                                 char *buf)
-{
-       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
-       return sprintf(buf, "%x\n", iommu->cap);
-}
-static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
-
-static ssize_t amd_iommu_show_features(struct device *dev,
-                                      struct device_attribute *attr,
-                                      char *buf)
-{
-       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
-       return sprintf(buf, "%llx\n", iommu->features);
-}
-static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
-
-static struct attribute *amd_iommu_attrs[] = {
-       &dev_attr_cap.attr,
-       &dev_attr_features.attr,
-       NULL,
-};
-
-static struct attribute_group amd_iommu_group = {
-       .name = "amd-iommu",
-       .attrs = amd_iommu_attrs,
-};
-
-static const struct attribute_group *amd_iommu_groups[] = {
-       &amd_iommu_group,
-       NULL,
-};
-
-static int __init iommu_init_pci(struct amd_iommu *iommu)
-{
-       int cap_ptr = iommu->cap_ptr;
-       int ret;
-
-       iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
-                                                iommu->devid & 0xff);
-       if (!iommu->dev)
-               return -ENODEV;
-
-       /* Prevent binding other PCI device drivers to IOMMU devices */
-       iommu->dev->match_driver = false;
-
-       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
-                             &iommu->cap);
-
-       if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
-               amd_iommu_iotlb_sup = false;
-
-       /* read extended feature bits */
-       iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
-
-       if (iommu_feature(iommu, FEATURE_GT)) {
-               int glxval;
-               u32 max_pasid;
-               u64 pasmax;
-
-               pasmax = iommu->features & FEATURE_PASID_MASK;
-               pasmax >>= FEATURE_PASID_SHIFT;
-               max_pasid  = (1 << (pasmax + 1)) - 1;
-
-               amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
-
-               BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
-
-               glxval   = iommu->features & FEATURE_GLXVAL_MASK;
-               glxval >>= FEATURE_GLXVAL_SHIFT;
-
-               if (amd_iommu_max_glx_val == -1)
-                       amd_iommu_max_glx_val = glxval;
-               else
-                       amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
-       }
-
-       if (iommu_feature(iommu, FEATURE_GT) &&
-           iommu_feature(iommu, FEATURE_PPR)) {
-               iommu->is_iommu_v2   = true;
-               amd_iommu_v2_present = true;
-       }
-
-       if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
-               return -ENOMEM;
-
-       ret = iommu_init_ga(iommu);
-       if (ret)
-               return ret;
-
-       if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
-               amd_iommu_np_cache = true;
-
-       init_iommu_perf_ctr(iommu);
-
-       if (is_rd890_iommu(iommu->dev)) {
-               int i, j;
-
-               iommu->root_pdev =
-                       pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
-                                                   PCI_DEVFN(0, 0));
-
-               /*
-                * Some rd890 systems may not be fully reconfigured by the
-                * BIOS, so it's necessary for us to store this information so
-                * it can be reprogrammed on resume
-                */
-               pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
-                               &iommu->stored_addr_lo);
-               pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
-                               &iommu->stored_addr_hi);
-
-               /* Low bit locks writes to configuration space */
-               iommu->stored_addr_lo &= ~1;
-
-               for (i = 0; i < 6; i++)
-                       for (j = 0; j < 0x12; j++)
-                               iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
-
-               for (i = 0; i < 0x83; i++)
-                       iommu->stored_l2[i] = iommu_read_l2(iommu, i);
-       }
-
-       amd_iommu_erratum_746_workaround(iommu);
-       amd_iommu_ats_write_check_workaround(iommu);
-
-       iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
-                              amd_iommu_groups, "ivhd%d", iommu->index);
-       iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
-       iommu_device_register(&iommu->iommu);
-
-       return pci_enable_device(iommu->dev);
-}
-
-static void print_iommu_info(void)
-{
-       static const char * const feat_str[] = {
-               "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
-               "IA", "GA", "HE", "PC"
-       };
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu) {
-               struct pci_dev *pdev = iommu->dev;
-               int i;
-
-               pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
-
-               if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
-                       pci_info(pdev, "Extended features (%#llx):\n",
-                                iommu->features);
-                       for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
-                               if (iommu_feature(iommu, (1ULL << i)))
-                                       pr_cont(" %s", feat_str[i]);
-                       }
-
-                       if (iommu->features & FEATURE_GAM_VAPIC)
-                               pr_cont(" GA_vAPIC");
-
-                       pr_cont("\n");
-               }
-       }
-       if (irq_remapping_enabled) {
-               pr_info("Interrupt remapping enabled\n");
-               if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
-                       pr_info("Virtual APIC enabled\n");
-               if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
-                       pr_info("X2APIC enabled\n");
-       }
-}
-
-static int __init amd_iommu_init_pci(void)
-{
-       struct amd_iommu *iommu;
-       int ret = 0;
-
-       for_each_iommu(iommu) {
-               ret = iommu_init_pci(iommu);
-               if (ret)
-                       break;
-       }
-
-       /*
-        * Order is important here to make sure any unity map requirements are
-        * fulfilled. The unity mappings are created and written to the device
-        * table during the amd_iommu_init_api() call.
-        *
-        * After that we call init_device_table_dma() to make sure any
-        * uninitialized DTE will block DMA, and in the end we flush the caches
-        * of all IOMMUs to make sure the changes to the device table are
-        * active.
-        */
-       ret = amd_iommu_init_api();
-
-       init_device_table_dma();
-
-       for_each_iommu(iommu)
-               iommu_flush_all_caches(iommu);
-
-       if (!ret)
-               print_iommu_info();
-
-       return ret;
-}
-
-/****************************************************************************
- *
- * The following functions initialize the MSI interrupts for all IOMMUs
- * in the system. It's a bit challenging because there could be multiple
- * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
- * pci_dev.
- *
- ****************************************************************************/
-
-static int iommu_setup_msi(struct amd_iommu *iommu)
-{
-       int r;
-
-       r = pci_enable_msi(iommu->dev);
-       if (r)
-               return r;
-
-       r = request_threaded_irq(iommu->dev->irq,
-                                amd_iommu_int_handler,
-                                amd_iommu_int_thread,
-                                0, "AMD-Vi",
-                                iommu);
-
-       if (r) {
-               pci_disable_msi(iommu->dev);
-               return r;
-       }
-
-       iommu->int_enabled = true;
-
-       return 0;
-}
-
-#define XT_INT_DEST_MODE(x)    (((x) & 0x1ULL) << 2)
-#define XT_INT_DEST_LO(x)      (((x) & 0xFFFFFFULL) << 8)
-#define XT_INT_VEC(x)          (((x) & 0xFFULL) << 32)
-#define XT_INT_DEST_HI(x)      ((((x) >> 24) & 0xFFULL) << 56)
-
-/**
- * Setup the IntCapXT registers with interrupt routing information
- * based on the PCI MSI capability block registers, accessed via
- * MMIO MSI address low/hi and MSI data registers.
- */
-static void iommu_update_intcapxt(struct amd_iommu *iommu)
-{
-       u64 val;
-       u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
-       u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
-       u32 data    = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
-       bool dm     = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
-       u32 dest    = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
-
-       if (x2apic_enabled())
-               dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
-
-       val = XT_INT_VEC(data & 0xFF) |
-             XT_INT_DEST_MODE(dm) |
-             XT_INT_DEST_LO(dest) |
-             XT_INT_DEST_HI(dest);
-
-       /**
-        * Current IOMMU implemtation uses the same IRQ for all
-        * 3 IOMMU interrupts.
-        */
-       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
-       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
-       writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
-}
-
-static void _irq_notifier_notify(struct irq_affinity_notify *notify,
-                                const cpumask_t *mask)
-{
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu) {
-               if (iommu->dev->irq == notify->irq) {
-                       iommu_update_intcapxt(iommu);
-                       break;
-               }
-       }
-}
-
-static void _irq_notifier_release(struct kref *ref)
-{
-}
-
-static int iommu_init_intcapxt(struct amd_iommu *iommu)
-{
-       int ret;
-       struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
-
-       /**
-        * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
-        * which can be inferred from amd_iommu_xt_mode.
-        */
-       if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
-               return 0;
-
-       /**
-        * Also, we need to setup notifier to update the IntCapXT registers
-        * whenever the irq affinity is changed from user-space.
-        */
-       notify->irq = iommu->dev->irq;
-       notify->notify = _irq_notifier_notify,
-       notify->release = _irq_notifier_release,
-       ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
-       if (ret) {
-               pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
-                      iommu->devid, iommu->dev->irq);
-               return ret;
-       }
-
-       iommu_update_intcapxt(iommu);
-       iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
-       return ret;
-}
-
-static int iommu_init_msi(struct amd_iommu *iommu)
-{
-       int ret;
-
-       if (iommu->int_enabled)
-               goto enable_faults;
-
-       if (iommu->dev->msi_cap)
-               ret = iommu_setup_msi(iommu);
-       else
-               ret = -ENODEV;
-
-       if (ret)
-               return ret;
-
-enable_faults:
-       ret = iommu_init_intcapxt(iommu);
-       if (ret)
-               return ret;
-
-       iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
-
-       if (iommu->ppr_log != NULL)
-               iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-
-       iommu_ga_log_enable(iommu);
-
-       return 0;
-}
-
-/****************************************************************************
- *
- * The next functions belong to the third pass of parsing the ACPI
- * table. In this last pass the memory mapping requirements are
- * gathered (like exclusion and unity mapping ranges).
- *
- ****************************************************************************/
-
-static void __init free_unity_maps(void)
-{
-       struct unity_map_entry *entry, *next;
-
-       list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-}
-
-/* called when we find an exclusion range definition in ACPI */
-static int __init init_exclusion_range(struct ivmd_header *m)
-{
-       int i;
-
-       switch (m->type) {
-       case ACPI_IVMD_TYPE:
-               set_device_exclusion_range(m->devid, m);
-               break;
-       case ACPI_IVMD_TYPE_ALL:
-               for (i = 0; i <= amd_iommu_last_bdf; ++i)
-                       set_device_exclusion_range(i, m);
-               break;
-       case ACPI_IVMD_TYPE_RANGE:
-               for (i = m->devid; i <= m->aux; ++i)
-                       set_device_exclusion_range(i, m);
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
-{
-       struct unity_map_entry *e = NULL;
-       char *s;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (e == NULL)
-               return -ENOMEM;
-
-       if (m->flags & IVMD_FLAG_EXCL_RANGE)
-               init_exclusion_range(m);
-
-       switch (m->type) {
-       default:
-               kfree(e);
-               return 0;
-       case ACPI_IVMD_TYPE:
-               s = "IVMD_TYPEi\t\t\t";
-               e->devid_start = e->devid_end = m->devid;
-               break;
-       case ACPI_IVMD_TYPE_ALL:
-               s = "IVMD_TYPE_ALL\t\t";
-               e->devid_start = 0;
-               e->devid_end = amd_iommu_last_bdf;
-               break;
-       case ACPI_IVMD_TYPE_RANGE:
-               s = "IVMD_TYPE_RANGE\t\t";
-               e->devid_start = m->devid;
-               e->devid_end = m->aux;
-               break;
-       }
-       e->address_start = PAGE_ALIGN(m->range_start);
-       e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
-       e->prot = m->flags >> 1;
-
-       DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
-                   " range_start: %016llx range_end: %016llx flags: %x\n", s,
-                   PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
-                   PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
-                   PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
-                   e->address_start, e->address_end, m->flags);
-
-       list_add_tail(&e->list, &amd_iommu_unity_map);
-
-       return 0;
-}
-
-/* iterates over all memory definitions we find in the ACPI table */
-static int __init init_memory_definitions(struct acpi_table_header *table)
-{
-       u8 *p = (u8 *)table, *end = (u8 *)table;
-       struct ivmd_header *m;
-
-       end += table->length;
-       p += IVRS_HEADER_LENGTH;
-
-       while (p < end) {
-               m = (struct ivmd_header *)p;
-               if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
-                       init_unity_map_range(m);
-
-               p += m->length;
-       }
-
-       return 0;
-}
-
-/*
- * Init the device table to not allow DMA access for devices
- */
-static void init_device_table_dma(void)
-{
-       u32 devid;
-
-       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
-               set_dev_entry_bit(devid, DEV_ENTRY_VALID);
-               set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
-       }
-}
-
-static void __init uninit_device_table_dma(void)
-{
-       u32 devid;
-
-       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
-               amd_iommu_dev_table[devid].data[0] = 0ULL;
-               amd_iommu_dev_table[devid].data[1] = 0ULL;
-       }
-}
-
-static void init_device_table(void)
-{
-       u32 devid;
-
-       if (!amd_iommu_irq_remap)
-               return;
-
-       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
-               set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
-}
-
-static void iommu_init_flags(struct amd_iommu *iommu)
-{
-       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
-               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
-               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
-       /*
-        * make IOMMU memory accesses cache coherent
-        */
-       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
-
-       /* Set IOTLB invalidation timeout to 1s */
-       iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
-}
-
-static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
-{
-       int i, j;
-       u32 ioc_feature_control;
-       struct pci_dev *pdev = iommu->root_pdev;
-
-       /* RD890 BIOSes may not have completely reconfigured the iommu */
-       if (!is_rd890_iommu(iommu->dev) || !pdev)
-               return;
-
-       /*
-        * First, we need to ensure that the iommu is enabled. This is
-        * controlled by a register in the northbridge
-        */
-
-       /* Select Northbridge indirect register 0x75 and enable writing */
-       pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
-       pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
-
-       /* Enable the iommu */
-       if (!(ioc_feature_control & 0x1))
-               pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
-
-       /* Restore the iommu BAR */
-       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
-                              iommu->stored_addr_lo);
-       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
-                              iommu->stored_addr_hi);
-
-       /* Restore the l1 indirect regs for each of the 6 l1s */
-       for (i = 0; i < 6; i++)
-               for (j = 0; j < 0x12; j++)
-                       iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
-
-       /* Restore the l2 indirect regs */
-       for (i = 0; i < 0x83; i++)
-               iommu_write_l2(iommu, i, iommu->stored_l2[i]);
-
-       /* Lock PCI setup registers */
-       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
-                              iommu->stored_addr_lo | 1);
-}
-
-static void iommu_enable_ga(struct amd_iommu *iommu)
-{
-#ifdef CONFIG_IRQ_REMAP
-       switch (amd_iommu_guest_ir) {
-       case AMD_IOMMU_GUEST_IR_VAPIC:
-               iommu_feature_enable(iommu, CONTROL_GAM_EN);
-               /* Fall through */
-       case AMD_IOMMU_GUEST_IR_LEGACY_GA:
-               iommu_feature_enable(iommu, CONTROL_GA_EN);
-               iommu->irte_ops = &irte_128_ops;
-               break;
-       default:
-               iommu->irte_ops = &irte_32_ops;
-               break;
-       }
-#endif
-}
-
-static void early_enable_iommu(struct amd_iommu *iommu)
-{
-       iommu_disable(iommu);
-       iommu_init_flags(iommu);
-       iommu_set_device_table(iommu);
-       iommu_enable_command_buffer(iommu);
-       iommu_enable_event_buffer(iommu);
-       iommu_set_exclusion_range(iommu);
-       iommu_enable_ga(iommu);
-       iommu_enable_xt(iommu);
-       iommu_enable(iommu);
-       iommu_flush_all_caches(iommu);
-}
-
-/*
- * This function finally enables all IOMMUs found in the system after
- * they have been initialized.
- *
- * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
- * the old content of device table entries. Not this case or copy failed,
- * just continue as normal kernel does.
- */
-static void early_enable_iommus(void)
-{
-       struct amd_iommu *iommu;
-
-
-       if (!copy_device_table()) {
-               /*
-                * If come here because of failure in copying device table from old
-                * kernel with all IOMMUs enabled, print error message and try to
-                * free allocated old_dev_tbl_cpy.
-                */
-               if (amd_iommu_pre_enabled)
-                       pr_err("Failed to copy DEV table from previous kernel.\n");
-               if (old_dev_tbl_cpy != NULL)
-                       free_pages((unsigned long)old_dev_tbl_cpy,
-                                       get_order(dev_table_size));
-
-               for_each_iommu(iommu) {
-                       clear_translation_pre_enabled(iommu);
-                       early_enable_iommu(iommu);
-               }
-       } else {
-               pr_info("Copied DEV table from previous kernel.\n");
-               free_pages((unsigned long)amd_iommu_dev_table,
-                               get_order(dev_table_size));
-               amd_iommu_dev_table = old_dev_tbl_cpy;
-               for_each_iommu(iommu) {
-                       iommu_disable_command_buffer(iommu);
-                       iommu_disable_event_buffer(iommu);
-                       iommu_enable_command_buffer(iommu);
-                       iommu_enable_event_buffer(iommu);
-                       iommu_enable_ga(iommu);
-                       iommu_enable_xt(iommu);
-                       iommu_set_device_table(iommu);
-                       iommu_flush_all_caches(iommu);
-               }
-       }
-
-#ifdef CONFIG_IRQ_REMAP
-       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
-               amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
-#endif
-}
-
-static void enable_iommus_v2(void)
-{
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu) {
-               iommu_enable_ppr_log(iommu);
-               iommu_enable_gt(iommu);
-       }
-}
-
-static void enable_iommus(void)
-{
-       early_enable_iommus();
-
-       enable_iommus_v2();
-}
-
-static void disable_iommus(void)
-{
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu)
-               iommu_disable(iommu);
-
-#ifdef CONFIG_IRQ_REMAP
-       if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
-               amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
-#endif
-}
-
-/*
- * Suspend/Resume support
- * disable suspend until real resume implemented
- */
-
-static void amd_iommu_resume(void)
-{
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu)
-               iommu_apply_resume_quirks(iommu);
-
-       /* re-load the hardware */
-       enable_iommus();
-
-       amd_iommu_enable_interrupts();
-}
-
-static int amd_iommu_suspend(void)
-{
-       /* disable IOMMUs to go out of the way for BIOS */
-       disable_iommus();
-
-       return 0;
-}
-
-static struct syscore_ops amd_iommu_syscore_ops = {
-       .suspend = amd_iommu_suspend,
-       .resume = amd_iommu_resume,
-};
-
-static void __init free_iommu_resources(void)
-{
-       kmemleak_free(irq_lookup_table);
-       free_pages((unsigned long)irq_lookup_table,
-                  get_order(rlookup_table_size));
-       irq_lookup_table = NULL;
-
-       kmem_cache_destroy(amd_iommu_irq_cache);
-       amd_iommu_irq_cache = NULL;
-
-       free_pages((unsigned long)amd_iommu_rlookup_table,
-                  get_order(rlookup_table_size));
-       amd_iommu_rlookup_table = NULL;
-
-       free_pages((unsigned long)amd_iommu_alias_table,
-                  get_order(alias_table_size));
-       amd_iommu_alias_table = NULL;
-
-       free_pages((unsigned long)amd_iommu_dev_table,
-                  get_order(dev_table_size));
-       amd_iommu_dev_table = NULL;
-
-       free_iommu_all();
-}
-
-/* SB IOAPIC is always on this device in AMD systems */
-#define IOAPIC_SB_DEVID                ((0x00 << 8) | PCI_DEVFN(0x14, 0))
-
-static bool __init check_ioapic_information(void)
-{
-       const char *fw_bug = FW_BUG;
-       bool ret, has_sb_ioapic;
-       int idx;
-
-       has_sb_ioapic = false;
-       ret           = false;
-
-       /*
-        * If we have map overrides on the kernel command line the
-        * messages in this function might not describe firmware bugs
-        * anymore - so be careful
-        */
-       if (cmdline_maps)
-               fw_bug = "";
-
-       for (idx = 0; idx < nr_ioapics; idx++) {
-               int devid, id = mpc_ioapic_id(idx);
-
-               devid = get_ioapic_devid(id);
-               if (devid < 0) {
-                       pr_err("%s: IOAPIC[%d] not in IVRS table\n",
-                               fw_bug, id);
-                       ret = false;
-               } else if (devid == IOAPIC_SB_DEVID) {
-                       has_sb_ioapic = true;
-                       ret           = true;
-               }
-       }
-
-       if (!has_sb_ioapic) {
-               /*
-                * We expect the SB IOAPIC to be listed in the IVRS
-                * table. The system timer is connected to the SB IOAPIC
-                * and if we don't have it in the list the system will
-                * panic at boot time.  This situation usually happens
-                * when the BIOS is buggy and provides us the wrong
-                * device id for the IOAPIC in the system.
-                */
-               pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
-       }
-
-       if (!ret)
-               pr_err("Disabling interrupt remapping\n");
-
-       return ret;
-}
-
-static void __init free_dma_resources(void)
-{
-       free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
-                  get_order(MAX_DOMAIN_ID/8));
-       amd_iommu_pd_alloc_bitmap = NULL;
-
-       free_unity_maps();
-}
-
-/*
- * This is the hardware init function for AMD IOMMU in the system.
- * This function is called either from amd_iommu_init or from the interrupt
- * remapping setup code.
- *
- * This function basically parses the ACPI table for AMD IOMMU (IVRS)
- * four times:
- *
- *     1 pass) Discover the most comprehensive IVHD type to use.
- *
- *     2 pass) Find the highest PCI device id the driver has to handle.
- *             Upon this information the size of the data structures is
- *             determined that needs to be allocated.
- *
- *     3 pass) Initialize the data structures just allocated with the
- *             information in the ACPI table about available AMD IOMMUs
- *             in the system. It also maps the PCI devices in the
- *             system to specific IOMMUs
- *
- *     4 pass) After the basic data structures are allocated and
- *             initialized we update them with information about memory
- *             remapping requirements parsed out of the ACPI table in
- *             this last pass.
- *
- * After everything is set up the IOMMUs are enabled and the necessary
- * hotplug and suspend notifiers are registered.
- */
-static int __init early_amd_iommu_init(void)
-{
-       struct acpi_table_header *ivrs_base;
-       acpi_status status;
-       int i, remap_cache_sz, ret = 0;
-       u32 pci_id;
-
-       if (!amd_iommu_detected)
-               return -ENODEV;
-
-       status = acpi_get_table("IVRS", 0, &ivrs_base);
-       if (status == AE_NOT_FOUND)
-               return -ENODEV;
-       else if (ACPI_FAILURE(status)) {
-               const char *err = acpi_format_exception(status);
-               pr_err("IVRS table error: %s\n", err);
-               return -EINVAL;
-       }
-
-       /*
-        * Validate checksum here so we don't need to do it when
-        * we actually parse the table
-        */
-       ret = check_ivrs_checksum(ivrs_base);
-       if (ret)
-               goto out;
-
-       amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
-       DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
-
-       /*
-        * First parse ACPI tables to find the largest Bus/Dev/Func
-        * we need to handle. Upon this information the shared data
-        * structures for the IOMMUs in the system will be allocated
-        */
-       ret = find_last_devid_acpi(ivrs_base);
-       if (ret)
-               goto out;
-
-       dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
-       alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
-       rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
-       /* Device table - directly used by all IOMMUs */
-       ret = -ENOMEM;
-       amd_iommu_dev_table = (void *)__get_free_pages(
-                                     GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
-                                     get_order(dev_table_size));
-       if (amd_iommu_dev_table == NULL)
-               goto out;
-
-       /*
-        * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
-        * IOMMU see for that device
-        */
-       amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
-                       get_order(alias_table_size));
-       if (amd_iommu_alias_table == NULL)
-               goto out;
-
-       /* IOMMU rlookup table - find the IOMMU for a specific device */
-       amd_iommu_rlookup_table = (void *)__get_free_pages(
-                       GFP_KERNEL | __GFP_ZERO,
-                       get_order(rlookup_table_size));
-       if (amd_iommu_rlookup_table == NULL)
-               goto out;
-
-       amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
-                                           GFP_KERNEL | __GFP_ZERO,
-                                           get_order(MAX_DOMAIN_ID/8));
-       if (amd_iommu_pd_alloc_bitmap == NULL)
-               goto out;
-
-       /*
-        * let all alias entries point to itself
-        */
-       for (i = 0; i <= amd_iommu_last_bdf; ++i)
-               amd_iommu_alias_table[i] = i;
-
-       /*
-        * never allocate domain 0 because its used as the non-allocated and
-        * error value placeholder
-        */
-       __set_bit(0, amd_iommu_pd_alloc_bitmap);
-
-       /*
-        * now the data structures are allocated and basically initialized
-        * start the real acpi table scan
-        */
-       ret = init_iommu_all(ivrs_base);
-       if (ret)
-               goto out;
-
-       /* Disable IOMMU if there's Stoney Ridge graphics */
-       for (i = 0; i < 32; i++) {
-               pci_id = read_pci_config(0, i, 0, 0);
-               if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
-                       pr_info("Disable IOMMU on Stoney Ridge\n");
-                       amd_iommu_disabled = true;
-                       break;
-               }
-       }
-
-       /* Disable any previously enabled IOMMUs */
-       if (!is_kdump_kernel() || amd_iommu_disabled)
-               disable_iommus();
-
-       if (amd_iommu_irq_remap)
-               amd_iommu_irq_remap = check_ioapic_information();
-
-       if (amd_iommu_irq_remap) {
-               /*
-                * Interrupt remapping enabled, create kmem_cache for the
-                * remapping tables.
-                */
-               ret = -ENOMEM;
-               if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
-                       remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
-               else
-                       remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
-               amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
-                                                       remap_cache_sz,
-                                                       IRQ_TABLE_ALIGNMENT,
-                                                       0, NULL);
-               if (!amd_iommu_irq_cache)
-                       goto out;
-
-               irq_lookup_table = (void *)__get_free_pages(
-                               GFP_KERNEL | __GFP_ZERO,
-                               get_order(rlookup_table_size));
-               kmemleak_alloc(irq_lookup_table, rlookup_table_size,
-                              1, GFP_KERNEL);
-               if (!irq_lookup_table)
-                       goto out;
-       }
-
-       ret = init_memory_definitions(ivrs_base);
-       if (ret)
-               goto out;
-
-       /* init the device table */
-       init_device_table();
-
-out:
-       /* Don't leak any ACPI memory */
-       acpi_put_table(ivrs_base);
-       ivrs_base = NULL;
-
-       return ret;
-}
-
-static int amd_iommu_enable_interrupts(void)
-{
-       struct amd_iommu *iommu;
-       int ret = 0;
-
-       for_each_iommu(iommu) {
-               ret = iommu_init_msi(iommu);
-               if (ret)
-                       goto out;
-       }
-
-out:
-       return ret;
-}
-
-static bool detect_ivrs(void)
-{
-       struct acpi_table_header *ivrs_base;
-       acpi_status status;
-
-       status = acpi_get_table("IVRS", 0, &ivrs_base);
-       if (status == AE_NOT_FOUND)
-               return false;
-       else if (ACPI_FAILURE(status)) {
-               const char *err = acpi_format_exception(status);
-               pr_err("IVRS table error: %s\n", err);
-               return false;
-       }
-
-       acpi_put_table(ivrs_base);
-
-       /* Make sure ACS will be enabled during PCI probe */
-       pci_request_acs();
-
-       return true;
-}
-
-/****************************************************************************
- *
- * AMD IOMMU Initialization State Machine
- *
- ****************************************************************************/
-
-static int __init state_next(void)
-{
-       int ret = 0;
-
-       switch (init_state) {
-       case IOMMU_START_STATE:
-               if (!detect_ivrs()) {
-                       init_state      = IOMMU_NOT_FOUND;
-                       ret             = -ENODEV;
-               } else {
-                       init_state      = IOMMU_IVRS_DETECTED;
-               }
-               break;
-       case IOMMU_IVRS_DETECTED:
-               ret = early_amd_iommu_init();
-               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
-               if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
-                       pr_info("AMD IOMMU disabled\n");
-                       init_state = IOMMU_CMDLINE_DISABLED;
-                       ret = -EINVAL;
-               }
-               break;
-       case IOMMU_ACPI_FINISHED:
-               early_enable_iommus();
-               x86_platform.iommu_shutdown = disable_iommus;
-               init_state = IOMMU_ENABLED;
-               break;
-       case IOMMU_ENABLED:
-               register_syscore_ops(&amd_iommu_syscore_ops);
-               ret = amd_iommu_init_pci();
-               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
-               enable_iommus_v2();
-               break;
-       case IOMMU_PCI_INIT:
-               ret = amd_iommu_enable_interrupts();
-               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
-               break;
-       case IOMMU_INTERRUPTS_EN:
-               ret = amd_iommu_init_dma_ops();
-               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
-               break;
-       case IOMMU_DMA_OPS:
-               init_state = IOMMU_INITIALIZED;
-               break;
-       case IOMMU_INITIALIZED:
-               /* Nothing to do */
-               break;
-       case IOMMU_NOT_FOUND:
-       case IOMMU_INIT_ERROR:
-       case IOMMU_CMDLINE_DISABLED:
-               /* Error states => do nothing */
-               ret = -EINVAL;
-               break;
-       default:
-               /* Unknown state */
-               BUG();
-       }
-
-       if (ret) {
-               free_dma_resources();
-               if (!irq_remapping_enabled) {
-                       disable_iommus();
-                       free_iommu_resources();
-               } else {
-                       struct amd_iommu *iommu;
-
-                       uninit_device_table_dma();
-                       for_each_iommu(iommu)
-                               iommu_flush_all_caches(iommu);
-               }
-       }
-       return ret;
-}
-
-static int __init iommu_go_to_state(enum iommu_init_state state)
-{
-       int ret = -EINVAL;
-
-       while (init_state != state) {
-               if (init_state == IOMMU_NOT_FOUND         ||
-                   init_state == IOMMU_INIT_ERROR        ||
-                   init_state == IOMMU_CMDLINE_DISABLED)
-                       break;
-               ret = state_next();
-       }
-
-       return ret;
-}
-
-#ifdef CONFIG_IRQ_REMAP
-int __init amd_iommu_prepare(void)
-{
-       int ret;
-
-       amd_iommu_irq_remap = true;
-
-       ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
-       if (ret)
-               return ret;
-       return amd_iommu_irq_remap ? 0 : -ENODEV;
-}
-
-int __init amd_iommu_enable(void)
-{
-       int ret;
-
-       ret = iommu_go_to_state(IOMMU_ENABLED);
-       if (ret)
-               return ret;
-
-       irq_remapping_enabled = 1;
-       return amd_iommu_xt_mode;
-}
-
-void amd_iommu_disable(void)
-{
-       amd_iommu_suspend();
-}
-
-int amd_iommu_reenable(int mode)
-{
-       amd_iommu_resume();
-
-       return 0;
-}
-
-int __init amd_iommu_enable_faulting(void)
-{
-       /* We enable MSI later when PCI is initialized */
-       return 0;
-}
-#endif
-
-/*
- * This is the core init function for AMD IOMMU hardware in the system.
- * This function is called from the generic x86 DMA layer initialization
- * code.
- */
-static int __init amd_iommu_init(void)
-{
-       struct amd_iommu *iommu;
-       int ret;
-
-       ret = iommu_go_to_state(IOMMU_INITIALIZED);
-#ifdef CONFIG_GART_IOMMU
-       if (ret && list_empty(&amd_iommu_list)) {
-               /*
-                * We failed to initialize the AMD IOMMU - try fallback
-                * to GART if possible.
-                */
-               gart_iommu_init();
-       }
-#endif
-
-       for_each_iommu(iommu)
-               amd_iommu_debugfs_setup(iommu);
-
-       return ret;
-}
-
-static bool amd_iommu_sme_check(void)
-{
-       if (!sme_active() || (boot_cpu_data.x86 != 0x17))
-               return true;
-
-       /* For Fam17h, a specific level of support is required */
-       if (boot_cpu_data.microcode >= 0x08001205)
-               return true;
-
-       if ((boot_cpu_data.microcode >= 0x08001126) &&
-           (boot_cpu_data.microcode <= 0x080011ff))
-               return true;
-
-       pr_notice("IOMMU not currently supported when SME is active\n");
-
-       return false;
-}
-
-/****************************************************************************
- *
- * Early detect code. This code runs at IOMMU detection time in the DMA
- * layer. It just looks if there is an IVRS ACPI table to detect AMD
- * IOMMUs
- *
- ****************************************************************************/
-int __init amd_iommu_detect(void)
-{
-       int ret;
-
-       if (no_iommu || (iommu_detected && !gart_iommu_aperture))
-               return -ENODEV;
-
-       if (!amd_iommu_sme_check())
-               return -ENODEV;
-
-       ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
-       if (ret)
-               return ret;
-
-       amd_iommu_detected = true;
-       iommu_detected = 1;
-       x86_init.iommu.iommu_init = amd_iommu_init;
-
-       return 1;
-}
-
-/****************************************************************************
- *
- * Parsing functions for the AMD IOMMU specific kernel command line
- * options.
- *
- ****************************************************************************/
-
-static int __init parse_amd_iommu_dump(char *str)
-{
-       amd_iommu_dump = true;
-
-       return 1;
-}
-
-static int __init parse_amd_iommu_intr(char *str)
-{
-       for (; *str; ++str) {
-               if (strncmp(str, "legacy", 6) == 0) {
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-                       break;
-               }
-               if (strncmp(str, "vapic", 5) == 0) {
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-                       break;
-               }
-       }
-       return 1;
-}
-
-static int __init parse_amd_iommu_options(char *str)
-{
-       for (; *str; ++str) {
-               if (strncmp(str, "fullflush", 9) == 0)
-                       amd_iommu_unmap_flush = true;
-               if (strncmp(str, "off", 3) == 0)
-                       amd_iommu_disabled = true;
-               if (strncmp(str, "force_isolation", 15) == 0)
-                       amd_iommu_force_isolation = true;
-       }
-
-       return 1;
-}
-
-static int __init parse_ivrs_ioapic(char *str)
-{
-       unsigned int bus, dev, fn;
-       int ret, id, i;
-       u16 devid;
-
-       ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
-       if (ret != 4) {
-               pr_err("Invalid command line: ivrs_ioapic%s\n", str);
-               return 1;
-       }
-
-       if (early_ioapic_map_size == EARLY_MAP_SIZE) {
-               pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
-                       str);
-               return 1;
-       }
-
-       devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
-
-       cmdline_maps                    = true;
-       i                               = early_ioapic_map_size++;
-       early_ioapic_map[i].id          = id;
-       early_ioapic_map[i].devid       = devid;
-       early_ioapic_map[i].cmd_line    = true;
-
-       return 1;
-}
-
-static int __init parse_ivrs_hpet(char *str)
-{
-       unsigned int bus, dev, fn;
-       int ret, id, i;
-       u16 devid;
-
-       ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
-       if (ret != 4) {
-               pr_err("Invalid command line: ivrs_hpet%s\n", str);
-               return 1;
-       }
-
-       if (early_hpet_map_size == EARLY_MAP_SIZE) {
-               pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
-                       str);
-               return 1;
-       }
-
-       devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
-
-       cmdline_maps                    = true;
-       i                               = early_hpet_map_size++;
-       early_hpet_map[i].id            = id;
-       early_hpet_map[i].devid         = devid;
-       early_hpet_map[i].cmd_line      = true;
-
-       return 1;
-}
-
-static int __init parse_ivrs_acpihid(char *str)
-{
-       u32 bus, dev, fn;
-       char *hid, *uid, *p;
-       char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
-       int ret, i;
-
-       ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
-       if (ret != 4) {
-               pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
-               return 1;
-       }
-
-       p = acpiid;
-       hid = strsep(&p, ":");
-       uid = p;
-
-       if (!hid || !(*hid) || !uid) {
-               pr_err("Invalid command line: hid or uid\n");
-               return 1;
-       }
-
-       i = early_acpihid_map_size++;
-       memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
-       memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
-       early_acpihid_map[i].devid =
-               ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
-       early_acpihid_map[i].cmd_line   = true;
-
-       return 1;
-}
-
-__setup("amd_iommu_dump",      parse_amd_iommu_dump);
-__setup("amd_iommu=",          parse_amd_iommu_options);
-__setup("amd_iommu_intr=",     parse_amd_iommu_intr);
-__setup("ivrs_ioapic",         parse_ivrs_ioapic);
-__setup("ivrs_hpet",           parse_ivrs_hpet);
-__setup("ivrs_acpihid",                parse_ivrs_acpihid);
-
-IOMMU_INIT_FINISH(amd_iommu_detect,
-                 gart_iommu_hole_init,
-                 NULL,
-                 NULL);
-
-bool amd_iommu_v2_supported(void)
-{
-       return amd_iommu_v2_present;
-}
-EXPORT_SYMBOL(amd_iommu_v2_supported);
-
-struct amd_iommu *get_amd_iommu(unsigned int idx)
-{
-       unsigned int i = 0;
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu)
-               if (i++ == idx)
-                       return iommu;
-       return NULL;
-}
-EXPORT_SYMBOL(get_amd_iommu);
-
-/****************************************************************************
- *
- * IOMMU EFR Performance Counter support functionality. This code allows
- * access to the IOMMU PC functionality.
- *
- ****************************************************************************/
-
-u8 amd_iommu_pc_get_max_banks(unsigned int idx)
-{
-       struct amd_iommu *iommu = get_amd_iommu(idx);
-
-       if (iommu)
-               return iommu->max_banks;
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
-
-bool amd_iommu_pc_supported(void)
-{
-       return amd_iommu_pc_present;
-}
-EXPORT_SYMBOL(amd_iommu_pc_supported);
-
-u8 amd_iommu_pc_get_max_counters(unsigned int idx)
-{
-       struct amd_iommu *iommu = get_amd_iommu(idx);
-
-       if (iommu)
-               return iommu->max_counters;
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
-                               u8 fxn, u64 *value, bool is_write)
-{
-       u32 offset;
-       u32 max_offset_lim;
-
-       /* Make sure the IOMMU PC resource is available */
-       if (!amd_iommu_pc_present)
-               return -ENODEV;
-
-       /* Check for valid iommu and pc register indexing */
-       if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
-               return -ENODEV;
-
-       offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
-
-       /* Limit the offset to the hw defined mmio region aperture */
-       max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
-                               (iommu->max_counters << 8) | 0x28);
-       if ((offset < MMIO_CNTR_REG_OFFSET) ||
-           (offset > max_offset_lim))
-               return -EINVAL;
-
-       if (is_write) {
-               u64 val = *value & GENMASK_ULL(47, 0);
-
-               writel((u32)val, iommu->mmio_base + offset);
-               writel((val >> 32), iommu->mmio_base + offset + 4);
-       } else {
-               *value = readl(iommu->mmio_base + offset + 4);
-               *value <<= 32;
-               *value |= readl(iommu->mmio_base + offset);
-               *value &= GENMASK_ULL(47, 0);
-       }
-
-       return 0;
-}
-
-int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
-{
-       if (!iommu)
-               return -EINVAL;
-
-       return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
-}
-EXPORT_SYMBOL(amd_iommu_pc_get_reg);
-
-int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
-{
-       if (!iommu)
-               return -EINVAL;
-
-       return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
-}
-EXPORT_SYMBOL(amd_iommu_pc_set_reg);
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
deleted file mode 100644 (file)
index 5120ce4..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-/*
- * Quirks for AMD IOMMU
- *
- * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
- */
-
-#ifdef CONFIG_DMI
-#include <linux/dmi.h>
-
-#include "amd_iommu.h"
-
-#define IVHD_SPECIAL_IOAPIC            1
-
-struct ivrs_quirk_entry {
-       u8 id;
-       u16 devid;
-};
-
-enum {
-       DELL_INSPIRON_7375 = 0,
-       DELL_LATITUDE_5495,
-       LENOVO_IDEAPAD_330S_15ARR,
-};
-
-static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
-       /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
-       [DELL_INSPIRON_7375] = {
-               { .id = 4, .devid = 0xa0 },
-               { .id = 5, .devid = 0x2 },
-               {}
-       },
-       /* ivrs_ioapic[4]=00:14.0 */
-       [DELL_LATITUDE_5495] = {
-               { .id = 4, .devid = 0xa0 },
-               {}
-       },
-       /* ivrs_ioapic[32]=00:14.0 */
-       [LENOVO_IDEAPAD_330S_15ARR] = {
-               { .id = 32, .devid = 0xa0 },
-               {}
-       },
-       {}
-};
-
-static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
-{
-       const struct ivrs_quirk_entry *i;
-
-       for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
-               add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
-
-       return 0;
-}
-
-static const struct dmi_system_id ivrs_quirks[] __initconst = {
-       {
-               .callback = ivrs_ioapic_quirk_cb,
-               .ident = "Dell Inspiron 7375",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
-               },
-               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
-       },
-       {
-               .callback = ivrs_ioapic_quirk_cb,
-               .ident = "Dell Latitude 5495",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
-               },
-               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
-       },
-       {
-               /*
-                * Acer Aspire A315-41 requires the very same workaround as
-                * Dell Latitude 5495
-                */
-               .callback = ivrs_ioapic_quirk_cb,
-               .ident = "Acer Aspire A315-41",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
-               },
-               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
-       },
-       {
-               .callback = ivrs_ioapic_quirk_cb,
-               .ident = "Lenovo ideapad 330S-15ARR",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
-               },
-               .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
-       },
-       {}
-};
-
-void __init amd_iommu_apply_ivrs_quirks(void)
-{
-       dmi_check_system(ivrs_quirks);
-}
-#endif
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
deleted file mode 100644 (file)
index 30a5d41..0000000
+++ /dev/null
@@ -1,907 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- *         Leo Duran <leo.duran@amd.com>
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
-#define _ASM_X86_AMD_IOMMU_TYPES_H
-
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/msi.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/irqreturn.h>
-
-/*
- * Maximum number of IOMMUs supported
- */
-#define MAX_IOMMUS     32
-
-/*
- * some size calculation constants
- */
-#define DEV_TABLE_ENTRY_SIZE           32
-#define ALIAS_TABLE_ENTRY_SIZE         2
-#define RLOOKUP_TABLE_ENTRY_SIZE       (sizeof(void *))
-
-/* Capability offsets used by the driver */
-#define MMIO_CAP_HDR_OFFSET    0x00
-#define MMIO_RANGE_OFFSET      0x0c
-#define MMIO_MISC_OFFSET       0x10
-
-/* Masks, shifts and macros to parse the device range capability */
-#define MMIO_RANGE_LD_MASK     0xff000000
-#define MMIO_RANGE_FD_MASK     0x00ff0000
-#define MMIO_RANGE_BUS_MASK    0x0000ff00
-#define MMIO_RANGE_LD_SHIFT    24
-#define MMIO_RANGE_FD_SHIFT    16
-#define MMIO_RANGE_BUS_SHIFT   8
-#define MMIO_GET_LD(x)  (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
-#define MMIO_GET_FD(x)  (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
-#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
-#define MMIO_MSI_NUM(x)        ((x) & 0x1f)
-
-/* Flag masks for the AMD IOMMU exclusion range */
-#define MMIO_EXCL_ENABLE_MASK 0x01ULL
-#define MMIO_EXCL_ALLOW_MASK  0x02ULL
-
-/* Used offsets into the MMIO space */
-#define MMIO_DEV_TABLE_OFFSET   0x0000
-#define MMIO_CMD_BUF_OFFSET     0x0008
-#define MMIO_EVT_BUF_OFFSET     0x0010
-#define MMIO_CONTROL_OFFSET     0x0018
-#define MMIO_EXCL_BASE_OFFSET   0x0020
-#define MMIO_EXCL_LIMIT_OFFSET  0x0028
-#define MMIO_EXT_FEATURES      0x0030
-#define MMIO_PPR_LOG_OFFSET    0x0038
-#define MMIO_GA_LOG_BASE_OFFSET        0x00e0
-#define MMIO_GA_LOG_TAIL_OFFSET        0x00e8
-#define MMIO_MSI_ADDR_LO_OFFSET        0x015C
-#define MMIO_MSI_ADDR_HI_OFFSET        0x0160
-#define MMIO_MSI_DATA_OFFSET   0x0164
-#define MMIO_INTCAPXT_EVT_OFFSET       0x0170
-#define MMIO_INTCAPXT_PPR_OFFSET       0x0178
-#define MMIO_INTCAPXT_GALOG_OFFSET     0x0180
-#define MMIO_CMD_HEAD_OFFSET   0x2000
-#define MMIO_CMD_TAIL_OFFSET   0x2008
-#define MMIO_EVT_HEAD_OFFSET   0x2010
-#define MMIO_EVT_TAIL_OFFSET   0x2018
-#define MMIO_STATUS_OFFSET     0x2020
-#define MMIO_PPR_HEAD_OFFSET   0x2030
-#define MMIO_PPR_TAIL_OFFSET   0x2038
-#define MMIO_GA_HEAD_OFFSET    0x2040
-#define MMIO_GA_TAIL_OFFSET    0x2048
-#define MMIO_CNTR_CONF_OFFSET  0x4000
-#define MMIO_CNTR_REG_OFFSET   0x40000
-#define MMIO_REG_END_OFFSET    0x80000
-
-
-
-/* Extended Feature Bits */
-#define FEATURE_PREFETCH       (1ULL<<0)
-#define FEATURE_PPR            (1ULL<<1)
-#define FEATURE_X2APIC         (1ULL<<2)
-#define FEATURE_NX             (1ULL<<3)
-#define FEATURE_GT             (1ULL<<4)
-#define FEATURE_IA             (1ULL<<6)
-#define FEATURE_GA             (1ULL<<7)
-#define FEATURE_HE             (1ULL<<8)
-#define FEATURE_PC             (1ULL<<9)
-#define FEATURE_GAM_VAPIC      (1ULL<<21)
-#define FEATURE_EPHSUP         (1ULL<<50)
-
-#define FEATURE_PASID_SHIFT    32
-#define FEATURE_PASID_MASK     (0x1fULL << FEATURE_PASID_SHIFT)
-
-#define FEATURE_GLXVAL_SHIFT   14
-#define FEATURE_GLXVAL_MASK    (0x03ULL << FEATURE_GLXVAL_SHIFT)
-
-/* Note:
- * The current driver only support 16-bit PASID.
- * Currently, hardware only implement upto 16-bit PASID
- * even though the spec says it could have upto 20 bits.
- */
-#define PASID_MASK             0x0000ffff
-
-/* MMIO status bits */
-#define MMIO_STATUS_EVT_INT_MASK       (1 << 1)
-#define MMIO_STATUS_COM_WAIT_INT_MASK  (1 << 2)
-#define MMIO_STATUS_PPR_INT_MASK       (1 << 6)
-#define MMIO_STATUS_GALOG_RUN_MASK     (1 << 8)
-#define MMIO_STATUS_GALOG_OVERFLOW_MASK        (1 << 9)
-#define MMIO_STATUS_GALOG_INT_MASK     (1 << 10)
-
-/* event logging constants */
-#define EVENT_ENTRY_SIZE       0x10
-#define EVENT_TYPE_SHIFT       28
-#define EVENT_TYPE_MASK                0xf
-#define EVENT_TYPE_ILL_DEV     0x1
-#define EVENT_TYPE_IO_FAULT    0x2
-#define EVENT_TYPE_DEV_TAB_ERR 0x3
-#define EVENT_TYPE_PAGE_TAB_ERR        0x4
-#define EVENT_TYPE_ILL_CMD     0x5
-#define EVENT_TYPE_CMD_HARD_ERR        0x6
-#define EVENT_TYPE_IOTLB_INV_TO        0x7
-#define EVENT_TYPE_INV_DEV_REQ 0x8
-#define EVENT_TYPE_INV_PPR_REQ 0x9
-#define EVENT_DEVID_MASK       0xffff
-#define EVENT_DEVID_SHIFT      0
-#define EVENT_DOMID_MASK_LO    0xffff
-#define EVENT_DOMID_MASK_HI    0xf0000
-#define EVENT_FLAGS_MASK       0xfff
-#define EVENT_FLAGS_SHIFT      0x10
-
-/* feature control bits */
-#define CONTROL_IOMMU_EN        0x00ULL
-#define CONTROL_HT_TUN_EN       0x01ULL
-#define CONTROL_EVT_LOG_EN      0x02ULL
-#define CONTROL_EVT_INT_EN      0x03ULL
-#define CONTROL_COMWAIT_EN      0x04ULL
-#define CONTROL_INV_TIMEOUT    0x05ULL
-#define CONTROL_PASSPW_EN       0x08ULL
-#define CONTROL_RESPASSPW_EN    0x09ULL
-#define CONTROL_COHERENT_EN     0x0aULL
-#define CONTROL_ISOC_EN         0x0bULL
-#define CONTROL_CMDBUF_EN       0x0cULL
-#define CONTROL_PPRLOG_EN       0x0dULL
-#define CONTROL_PPRINT_EN       0x0eULL
-#define CONTROL_PPR_EN          0x0fULL
-#define CONTROL_GT_EN           0x10ULL
-#define CONTROL_GA_EN           0x11ULL
-#define CONTROL_GAM_EN          0x19ULL
-#define CONTROL_GALOG_EN        0x1CULL
-#define CONTROL_GAINT_EN        0x1DULL
-#define CONTROL_XT_EN           0x32ULL
-#define CONTROL_INTCAPXT_EN     0x33ULL
-
-#define CTRL_INV_TO_MASK       (7 << CONTROL_INV_TIMEOUT)
-#define CTRL_INV_TO_NONE       0
-#define CTRL_INV_TO_1MS                1
-#define CTRL_INV_TO_10MS       2
-#define CTRL_INV_TO_100MS      3
-#define CTRL_INV_TO_1S         4
-#define CTRL_INV_TO_10S                5
-#define CTRL_INV_TO_100S       6
-
-/* command specific defines */
-#define CMD_COMPL_WAIT          0x01
-#define CMD_INV_DEV_ENTRY       0x02
-#define CMD_INV_IOMMU_PAGES    0x03
-#define CMD_INV_IOTLB_PAGES    0x04
-#define CMD_INV_IRT            0x05
-#define CMD_COMPLETE_PPR       0x07
-#define CMD_INV_ALL            0x08
-
-#define CMD_COMPL_WAIT_STORE_MASK      0x01
-#define CMD_COMPL_WAIT_INT_MASK                0x02
-#define CMD_INV_IOMMU_PAGES_SIZE_MASK  0x01
-#define CMD_INV_IOMMU_PAGES_PDE_MASK   0x02
-#define CMD_INV_IOMMU_PAGES_GN_MASK    0x04
-
-#define PPR_STATUS_MASK                        0xf
-#define PPR_STATUS_SHIFT               12
-
-#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS        0x7fffffffffffffffULL
-
-/* macros and definitions for device table entries */
-#define DEV_ENTRY_VALID         0x00
-#define DEV_ENTRY_TRANSLATION   0x01
-#define DEV_ENTRY_PPR           0x34
-#define DEV_ENTRY_IR            0x3d
-#define DEV_ENTRY_IW            0x3e
-#define DEV_ENTRY_NO_PAGE_FAULT        0x62
-#define DEV_ENTRY_EX            0x67
-#define DEV_ENTRY_SYSMGT1       0x68
-#define DEV_ENTRY_SYSMGT2       0x69
-#define DEV_ENTRY_IRQ_TBL_EN   0x80
-#define DEV_ENTRY_INIT_PASS     0xb8
-#define DEV_ENTRY_EINT_PASS     0xb9
-#define DEV_ENTRY_NMI_PASS      0xba
-#define DEV_ENTRY_LINT0_PASS    0xbe
-#define DEV_ENTRY_LINT1_PASS    0xbf
-#define DEV_ENTRY_MODE_MASK    0x07
-#define DEV_ENTRY_MODE_SHIFT   0x09
-
-#define MAX_DEV_TABLE_ENTRIES  0xffff
-
-/* constants to configure the command buffer */
-#define CMD_BUFFER_SIZE    8192
-#define CMD_BUFFER_UNINITIALIZED 1
-#define CMD_BUFFER_ENTRIES 512
-#define MMIO_CMD_SIZE_SHIFT 56
-#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
-
-/* constants for event buffer handling */
-#define EVT_BUFFER_SIZE                8192 /* 512 entries */
-#define EVT_LEN_MASK           (0x9ULL << 56)
-
-/* Constants for PPR Log handling */
-#define PPR_LOG_ENTRIES                512
-#define PPR_LOG_SIZE_SHIFT     56
-#define PPR_LOG_SIZE_512       (0x9ULL << PPR_LOG_SIZE_SHIFT)
-#define PPR_ENTRY_SIZE         16
-#define PPR_LOG_SIZE           (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
-
-#define PPR_REQ_TYPE(x)                (((x) >> 60) & 0xfULL)
-#define PPR_FLAGS(x)           (((x) >> 48) & 0xfffULL)
-#define PPR_DEVID(x)           ((x) & 0xffffULL)
-#define PPR_TAG(x)             (((x) >> 32) & 0x3ffULL)
-#define PPR_PASID1(x)          (((x) >> 16) & 0xffffULL)
-#define PPR_PASID2(x)          (((x) >> 42) & 0xfULL)
-#define PPR_PASID(x)           ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
-
-#define PPR_REQ_FAULT          0x01
-
-/* Constants for GA Log handling */
-#define GA_LOG_ENTRIES         512
-#define GA_LOG_SIZE_SHIFT      56
-#define GA_LOG_SIZE_512                (0x8ULL << GA_LOG_SIZE_SHIFT)
-#define GA_ENTRY_SIZE          8
-#define GA_LOG_SIZE            (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
-
-#define GA_TAG(x)              (u32)(x & 0xffffffffULL)
-#define GA_DEVID(x)            (u16)(((x) >> 32) & 0xffffULL)
-#define GA_REQ_TYPE(x)         (((x) >> 60) & 0xfULL)
-
-#define GA_GUEST_NR            0x1
-
-/* Bit value definition for dte irq remapping fields*/
-#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
-#define DTE_IRQ_REMAP_INTCTL_MASK      (0x3ULL << 60)
-#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
-#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
-#define DTE_IRQ_REMAP_ENABLE    1ULL
-
-#define PAGE_MODE_NONE    0x00
-#define PAGE_MODE_1_LEVEL 0x01
-#define PAGE_MODE_2_LEVEL 0x02
-#define PAGE_MODE_3_LEVEL 0x03
-#define PAGE_MODE_4_LEVEL 0x04
-#define PAGE_MODE_5_LEVEL 0x05
-#define PAGE_MODE_6_LEVEL 0x06
-#define PAGE_MODE_7_LEVEL 0x07
-
-#define PM_LEVEL_SHIFT(x)      (12 + ((x) * 9))
-#define PM_LEVEL_SIZE(x)       (((x) < 6) ? \
-                                 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
-                                  (0xffffffffffffffffULL))
-#define PM_LEVEL_INDEX(x, a)   (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
-#define PM_LEVEL_ENC(x)                (((x) << 9) & 0xe00ULL)
-#define PM_LEVEL_PDE(x, a)     ((a) | PM_LEVEL_ENC((x)) | \
-                                IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
-#define PM_PTE_LEVEL(pte)      (((pte) >> 9) & 0x7ULL)
-
-#define PM_MAP_4k              0
-#define PM_ADDR_MASK           0x000ffffffffff000ULL
-#define PM_MAP_MASK(lvl)       (PM_ADDR_MASK & \
-                               (~((1ULL << (12 + ((lvl) * 9))) - 1)))
-#define PM_ALIGNED(lvl, addr)  ((PM_MAP_MASK(lvl) & (addr)) == (addr))
-
-/*
- * Returns the page table level to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_LEVEL(pagesize) \
-               ((__ffs(pagesize) - 12) / 9)
-/*
- * Returns the number of ptes to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_PTE_COUNT(pagesize) \
-               (1ULL << ((__ffs(pagesize) - 12) % 9))
-
-/*
- * Aligns a given io-virtual address to a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_ALIGN(address, pagesize) \
-               ((address) & ~((pagesize) - 1))
-/*
- * Creates an IOMMU PTE for an address and a given pagesize
- * The PTE has no permission bits set
- * Pagesize is expected to be a power-of-two larger than 4096
- */
-#define PAGE_SIZE_PTE(address, pagesize)               \
-               (((address) | ((pagesize) - 1)) &       \
-                (~(pagesize >> 1)) & PM_ADDR_MASK)
-
-/*
- * Takes a PTE value with mode=0x07 and returns the page size it maps
- */
-#define PTE_PAGE_SIZE(pte) \
-       (1ULL << (1 + ffz(((pte) | 0xfffULL))))
-
-/*
- * Takes a page-table level and returns the default page-size for this level
- */
-#define PTE_LEVEL_PAGE_SIZE(level)                     \
-       (1ULL << (12 + (9 * (level))))
-
-/*
- * Bit value definition for I/O PTE fields
- */
-#define IOMMU_PTE_PR (1ULL << 0)
-#define IOMMU_PTE_U  (1ULL << 59)
-#define IOMMU_PTE_FC (1ULL << 60)
-#define IOMMU_PTE_IR (1ULL << 61)
-#define IOMMU_PTE_IW (1ULL << 62)
-
-/*
- * Bit value definition for DTE fields
- */
-#define DTE_FLAG_V  (1ULL << 0)
-#define DTE_FLAG_TV (1ULL << 1)
-#define DTE_FLAG_IR (1ULL << 61)
-#define DTE_FLAG_IW (1ULL << 62)
-
-#define DTE_FLAG_IOTLB (1ULL << 32)
-#define DTE_FLAG_GV    (1ULL << 55)
-#define DTE_FLAG_MASK  (0x3ffULL << 32)
-#define DTE_GLX_SHIFT  (56)
-#define DTE_GLX_MASK   (3)
-#define DEV_DOMID_MASK 0xffffULL
-
-#define DTE_GCR3_VAL_A(x)      (((x) >> 12) & 0x00007ULL)
-#define DTE_GCR3_VAL_B(x)      (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x)      (((x) >> 31) & 0x1fffffULL)
-
-#define DTE_GCR3_INDEX_A       0
-#define DTE_GCR3_INDEX_B       1
-#define DTE_GCR3_INDEX_C       1
-
-#define DTE_GCR3_SHIFT_A       58
-#define DTE_GCR3_SHIFT_B       16
-#define DTE_GCR3_SHIFT_C       43
-
-#define GCR3_VALID             0x01ULL
-
-#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
-#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
-#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
-#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
-
-#define IOMMU_PROT_MASK 0x03
-#define IOMMU_PROT_IR 0x01
-#define IOMMU_PROT_IW 0x02
-
-#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE        (1 << 2)
-
-/* IOMMU capabilities */
-#define IOMMU_CAP_IOTLB   24
-#define IOMMU_CAP_NPCACHE 26
-#define IOMMU_CAP_EFR     27
-
-/* IOMMU Feature Reporting Field (for IVHD type 10h */
-#define IOMMU_FEAT_GASUP_SHIFT 6
-
-/* IOMMU Extended Feature Register (EFR) */
-#define IOMMU_EFR_XTSUP_SHIFT  2
-#define IOMMU_EFR_GASUP_SHIFT  7
-#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT  46
-
-#define MAX_DOMAIN_ID 65536
-
-/* Protection domain flags */
-#define PD_DMA_OPS_MASK                (1UL << 0) /* domain used for dma_ops */
-#define PD_DEFAULT_MASK                (1UL << 1) /* domain is a default dma_ops
-                                             domain for an IOMMU */
-#define PD_PASSTHROUGH_MASK    (1UL << 2) /* domain has no page
-                                             translation */
-#define PD_IOMMUV2_MASK                (1UL << 3) /* domain has gcr3 table */
-
-extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...)                            \
-       do {                                                    \
-               if (amd_iommu_dump)                             \
-                       pr_info("AMD-Vi: " format, ## arg);     \
-       } while(0);
-
-/* global flag if IOMMUs cache non-present entries */
-extern bool amd_iommu_np_cache;
-/* Only true if all IOMMUs support device IOTLBs */
-extern bool amd_iommu_iotlb_sup;
-
-#define MAX_IRQS_PER_TABLE     256
-#define IRQ_TABLE_ALIGNMENT    128
-
-struct irq_remap_table {
-       raw_spinlock_t lock;
-       unsigned min_index;
-       u32 *table;
-};
-
-extern struct irq_remap_table **irq_lookup_table;
-
-/* Interrupt remapping feature used? */
-extern bool amd_iommu_irq_remap;
-
-/* kmem_cache to get tables with 128 byte alignement */
-extern struct kmem_cache *amd_iommu_irq_cache;
-
-/*
- * Make iterating over all IOMMUs easier
- */
-#define for_each_iommu(iommu) \
-       list_for_each_entry((iommu), &amd_iommu_list, list)
-#define for_each_iommu_safe(iommu, next) \
-       list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
-
-#define APERTURE_RANGE_SHIFT   27      /* 128 MB */
-#define APERTURE_RANGE_SIZE    (1ULL << APERTURE_RANGE_SHIFT)
-#define APERTURE_RANGE_PAGES   (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
-#define APERTURE_MAX_RANGES    32      /* allows 4GB of DMA address space */
-#define APERTURE_RANGE_INDEX(a)        ((a) >> APERTURE_RANGE_SHIFT)
-#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
-/*
- * This struct is used to pass information about
- * incoming PPR faults around.
- */
-struct amd_iommu_fault {
-       u64 address;    /* IO virtual address of the fault*/
-       u32 pasid;      /* Address space identifier */
-       u16 device_id;  /* Originating PCI device id */
-       u16 tag;        /* PPR tag */
-       u16 flags;      /* Fault flags */
-
-};
-
-
-struct iommu_domain;
-struct irq_domain;
-struct amd_irte_ops;
-
-#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED      (1 << 0)
-
-/*
- * This structure contains generic data for  IOMMU protection domains
- * independent of their use.
- */
-struct protection_domain {
-       struct list_head dev_list; /* List of all devices in this domain */
-       struct iommu_domain domain; /* generic domain handle used by
-                                      iommu core code */
-       spinlock_t lock;        /* mostly used to lock the page table*/
-       u16 id;                 /* the domain id written to the device table */
-       atomic64_t pt_root;     /* pgtable root and pgtable mode */
-       int glx;                /* Number of levels for GCR3 table */
-       u64 *gcr3_tbl;          /* Guest CR3 table */
-       unsigned long flags;    /* flags to find out type of domain */
-       unsigned dev_cnt;       /* devices assigned to this domain */
-       unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
-};
-
-/* For decocded pt_root */
-struct domain_pgtable {
-       int mode;
-       u64 *root;
-};
-
-/*
- * Structure where we save information about one hardware AMD IOMMU in the
- * system.
- */
-struct amd_iommu {
-       struct list_head list;
-
-       /* Index within the IOMMU array */
-       int index;
-
-       /* locks the accesses to the hardware */
-       raw_spinlock_t lock;
-
-       /* Pointer to PCI device of this IOMMU */
-       struct pci_dev *dev;
-
-       /* Cache pdev to root device for resume quirks */
-       struct pci_dev *root_pdev;
-
-       /* physical address of MMIO space */
-       u64 mmio_phys;
-
-       /* physical end address of MMIO space */
-       u64 mmio_phys_end;
-
-       /* virtual address of MMIO space */
-       u8 __iomem *mmio_base;
-
-       /* capabilities of that IOMMU read from ACPI */
-       u32 cap;
-
-       /* flags read from acpi table */
-       u8 acpi_flags;
-
-       /* Extended features */
-       u64 features;
-
-       /* IOMMUv2 */
-       bool is_iommu_v2;
-
-       /* PCI device id of the IOMMU device */
-       u16 devid;
-
-       /*
-        * Capability pointer. There could be more than one IOMMU per PCI
-        * device function if there are more than one AMD IOMMU capability
-        * pointers.
-        */
-       u16 cap_ptr;
-
-       /* pci domain of this IOMMU */
-       u16 pci_seg;
-
-       /* start of exclusion range of that IOMMU */
-       u64 exclusion_start;
-       /* length of exclusion range of that IOMMU */
-       u64 exclusion_length;
-
-       /* command buffer virtual address */
-       u8 *cmd_buf;
-       u32 cmd_buf_head;
-       u32 cmd_buf_tail;
-
-       /* event buffer virtual address */
-       u8 *evt_buf;
-
-       /* Base of the PPR log, if present */
-       u8 *ppr_log;
-
-       /* Base of the GA log, if present */
-       u8 *ga_log;
-
-       /* Tail of the GA log, if present */
-       u8 *ga_log_tail;
-
-       /* true if interrupts for this IOMMU are already enabled */
-       bool int_enabled;
-
-       /* if one, we need to send a completion wait command */
-       bool need_sync;
-
-       /* Handle for IOMMU core code */
-       struct iommu_device iommu;
-
-       /*
-        * We can't rely on the BIOS to restore all values on reinit, so we
-        * need to stash them
-        */
-
-       /* The iommu BAR */
-       u32 stored_addr_lo;
-       u32 stored_addr_hi;
-
-       /*
-        * Each iommu has 6 l1s, each of which is documented as having 0x12
-        * registers
-        */
-       u32 stored_l1[6][0x12];
-
-       /* The l2 indirect registers */
-       u32 stored_l2[0x83];
-
-       /* The maximum PC banks and counters/bank (PCSup=1) */
-       u8 max_banks;
-       u8 max_counters;
-#ifdef CONFIG_IRQ_REMAP
-       struct irq_domain *ir_domain;
-       struct irq_domain *msi_domain;
-
-       struct amd_irte_ops *irte_ops;
-#endif
-
-       u32 flags;
-       volatile u64 __aligned(8) cmd_sem;
-
-#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-       /* DebugFS Info */
-       struct dentry *debugfs;
-#endif
-       /* IRQ notifier for IntCapXT interrupt */
-       struct irq_affinity_notify intcapxt_notify;
-};
-
-static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
-{
-       struct iommu_device *iommu = dev_to_iommu_device(dev);
-
-       return container_of(iommu, struct amd_iommu, iommu);
-}
-
-#define ACPIHID_UID_LEN 256
-#define ACPIHID_HID_LEN 9
-
-struct acpihid_map_entry {
-       struct list_head list;
-       u8 uid[ACPIHID_UID_LEN];
-       u8 hid[ACPIHID_HID_LEN];
-       u16 devid;
-       u16 root_devid;
-       bool cmd_line;
-       struct iommu_group *group;
-};
-
-struct devid_map {
-       struct list_head list;
-       u8 id;
-       u16 devid;
-       bool cmd_line;
-};
-
-/*
- * This struct contains device specific data for the IOMMU
- */
-struct iommu_dev_data {
-       /*Protect against attach/detach races */
-       spinlock_t lock;
-
-       struct list_head list;            /* For domain->dev_list */
-       struct llist_node dev_data_list;  /* For global dev_data_list */
-       struct protection_domain *domain; /* Domain the device is bound to */
-       struct pci_dev *pdev;
-       u16 devid;                        /* PCI Device ID */
-       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
-       struct {
-               bool enabled;
-               int qdep;
-       } ats;                            /* ATS state */
-       bool pri_tlp;                     /* PASID TLB required for
-                                            PPR completions */
-       u32 errata;                       /* Bitmap for errata to apply */
-       bool use_vapic;                   /* Enable device to use vapic mode */
-       bool defer_attach;
-
-       struct ratelimit_state rs;        /* Ratelimit IOPF messages */
-};
-
-/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
-extern struct list_head ioapic_map;
-extern struct list_head hpet_map;
-extern struct list_head acpihid_map;
-
-/*
- * List with all IOMMUs in the system. This list is not locked because it is
- * only written and read at driver initialization or suspend time
- */
-extern struct list_head amd_iommu_list;
-
-/*
- * Array with pointers to each IOMMU struct
- * The indices are referenced in the protection domains
- */
-extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
-
-/*
- * Structure defining one entry in the device table
- */
-struct dev_table_entry {
-       u64 data[4];
-};
-
-/*
- * One entry for unity mappings parsed out of the ACPI table.
- */
-struct unity_map_entry {
-       struct list_head list;
-
-       /* starting device id this entry is used for (including) */
-       u16 devid_start;
-       /* end device id this entry is used for (including) */
-       u16 devid_end;
-
-       /* start address to unity map (including) */
-       u64 address_start;
-       /* end address to unity map (including) */
-       u64 address_end;
-
-       /* required protection */
-       int prot;
-};
-
-/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
- * Data structures for device handling
- */
-
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
-/* size of the dma_ops aperture as power of 2 */
-extern unsigned amd_iommu_aperture_order;
-
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
-/* allocation bitmap for domain ids */
-extern unsigned long *amd_iommu_pd_alloc_bitmap;
-
-/*
- * If true, the addresses will be flushed on unmap time, not when
- * they are reused
- */
-extern bool amd_iommu_unmap_flush;
-
-/* Smallest max PASID supported by any IOMMU in the system */
-extern u32 amd_iommu_max_pasid;
-
-extern bool amd_iommu_v2_present;
-
-extern bool amd_iommu_force_isolation;
-
-/* Max levels of glxval supported */
-extern int amd_iommu_max_glx_val;
-
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-extern void iommu_flush_all_caches(struct amd_iommu *iommu);
-
-static inline int get_ioapic_devid(int id)
-{
-       struct devid_map *entry;
-
-       list_for_each_entry(entry, &ioapic_map, list) {
-               if (entry->id == id)
-                       return entry->devid;
-       }
-
-       return -EINVAL;
-}
-
-static inline int get_hpet_devid(int id)
-{
-       struct devid_map *entry;
-
-       list_for_each_entry(entry, &hpet_map, list) {
-               if (entry->id == id)
-                       return entry->devid;
-       }
-
-       return -EINVAL;
-}
-
-enum amd_iommu_intr_mode_type {
-       AMD_IOMMU_GUEST_IR_LEGACY,
-
-       /* This mode is not visible to users. It is used when
-        * we cannot fully enable vAPIC and fallback to only support
-        * legacy interrupt remapping via 128-bit IRTE.
-        */
-       AMD_IOMMU_GUEST_IR_LEGACY_GA,
-       AMD_IOMMU_GUEST_IR_VAPIC,
-};
-
-#define AMD_IOMMU_GUEST_IR_GA(x)       (x == AMD_IOMMU_GUEST_IR_VAPIC || \
-                                        x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
-
-#define AMD_IOMMU_GUEST_IR_VAPIC(x)    (x == AMD_IOMMU_GUEST_IR_VAPIC)
-
-union irte {
-       u32 val;
-       struct {
-               u32 valid       : 1,
-                   no_fault    : 1,
-                   int_type    : 3,
-                   rq_eoi      : 1,
-                   dm          : 1,
-                   rsvd_1      : 1,
-                   destination : 8,
-                   vector      : 8,
-                   rsvd_2      : 8;
-       } fields;
-};
-
-#define APICID_TO_IRTE_DEST_LO(x)    (x & 0xffffff)
-#define APICID_TO_IRTE_DEST_HI(x)    ((x >> 24) & 0xff)
-
-union irte_ga_lo {
-       u64 val;
-
-       /* For int remapping */
-       struct {
-               u64 valid       : 1,
-                   no_fault    : 1,
-                   /* ------ */
-                   int_type    : 3,
-                   rq_eoi      : 1,
-                   dm          : 1,
-                   /* ------ */
-                   guest_mode  : 1,
-                   destination : 24,
-                   ga_tag      : 32;
-       } fields_remap;
-
-       /* For guest vAPIC */
-       struct {
-               u64 valid       : 1,
-                   no_fault    : 1,
-                   /* ------ */
-                   ga_log_intr : 1,
-                   rsvd1       : 3,
-                   is_run      : 1,
-                   /* ------ */
-                   guest_mode  : 1,
-                   destination : 24,
-                   ga_tag      : 32;
-       } fields_vapic;
-};
-
-union irte_ga_hi {
-       u64 val;
-       struct {
-               u64 vector      : 8,
-                   rsvd_1      : 4,
-                   ga_root_ptr : 40,
-                   rsvd_2      : 4,
-                   destination : 8;
-       } fields;
-};
-
-struct irte_ga {
-       union irte_ga_lo lo;
-       union irte_ga_hi hi;
-};
-
-struct irq_2_irte {
-       u16 devid; /* Device ID for IRTE table */
-       u16 index; /* Index into IRTE table*/
-};
-
-struct amd_ir_data {
-       u32 cached_ga_tag;
-       struct irq_2_irte irq_2_irte;
-       struct msi_msg msi_entry;
-       void *entry;    /* Pointer to union irte or struct irte_ga */
-       void *ref;      /* Pointer to the actual irte */
-
-       /**
-        * Store information for activate/de-activate
-        * Guest virtual APIC mode during runtime.
-        */
-       struct irq_cfg *cfg;
-       int ga_vector;
-       int ga_root_ptr;
-       int ga_tag;
-};
-
-struct amd_irte_ops {
-       void (*prepare)(void *, u32, u32, u8, u32, int);
-       void (*activate)(void *, u16, u16);
-       void (*deactivate)(void *, u16, u16);
-       void (*set_affinity)(void *, u16, u16, u8, u32);
-       void *(*get)(struct irq_remap_table *, int);
-       void (*set_allocated)(struct irq_remap_table *, int);
-       bool (*is_allocated)(struct irq_remap_table *, int);
-       void (*clear_allocated)(struct irq_remap_table *, int);
-};
-
-#ifdef CONFIG_IRQ_REMAP
-extern struct amd_irte_ops irte_32_ops;
-extern struct amd_irte_ops irte_128_ops;
-#endif
-
-#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
deleted file mode 100644 (file)
index c8a7b6b..0000000
+++ /dev/null
@@ -1,981 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#define pr_fmt(fmt)     "AMD-Vi: " fmt
-
-#include <linux/mmu_notifier.h>
-#include <linux/amd-iommu.h>
-#include <linux/mm_types.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-
-#include "amd_iommu.h"
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
-
-#define MAX_DEVICES            0x10000
-#define PRI_QUEUE_SIZE         512
-
-struct pri_queue {
-       atomic_t inflight;
-       bool finish;
-       int status;
-};
-
-struct pasid_state {
-       struct list_head list;                  /* For global state-list */
-       atomic_t count;                         /* Reference count */
-       unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
-                                                  calls */
-       struct mm_struct *mm;                   /* mm_struct for the faults */
-       struct mmu_notifier mn;                 /* mmu_notifier handle */
-       struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
-       struct device_state *device_state;      /* Link to our device_state */
-       int pasid;                              /* PASID index */
-       bool invalid;                           /* Used during setup and
-                                                  teardown of the pasid */
-       spinlock_t lock;                        /* Protect pri_queues and
-                                                  mmu_notifer_count */
-       wait_queue_head_t wq;                   /* To wait for count == 0 */
-};
-
-struct device_state {
-       struct list_head list;
-       u16 devid;
-       atomic_t count;
-       struct pci_dev *pdev;
-       struct pasid_state **states;
-       struct iommu_domain *domain;
-       int pasid_levels;
-       int max_pasids;
-       amd_iommu_invalid_ppr_cb inv_ppr_cb;
-       amd_iommu_invalidate_ctx inv_ctx_cb;
-       spinlock_t lock;
-       wait_queue_head_t wq;
-};
-
-struct fault {
-       struct work_struct work;
-       struct device_state *dev_state;
-       struct pasid_state *state;
-       struct mm_struct *mm;
-       u64 address;
-       u16 devid;
-       u16 pasid;
-       u16 tag;
-       u16 finish;
-       u16 flags;
-};
-
-static LIST_HEAD(state_list);
-static spinlock_t state_lock;
-
-static struct workqueue_struct *iommu_wq;
-
-static void free_pasid_states(struct device_state *dev_state);
-
-static u16 device_id(struct pci_dev *pdev)
-{
-       u16 devid;
-
-       devid = pdev->bus->number;
-       devid = (devid << 8) | pdev->devfn;
-
-       return devid;
-}
-
-static struct device_state *__get_device_state(u16 devid)
-{
-       struct device_state *dev_state;
-
-       list_for_each_entry(dev_state, &state_list, list) {
-               if (dev_state->devid == devid)
-                       return dev_state;
-       }
-
-       return NULL;
-}
-
-static struct device_state *get_device_state(u16 devid)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-
-       spin_lock_irqsave(&state_lock, flags);
-       dev_state = __get_device_state(devid);
-       if (dev_state != NULL)
-               atomic_inc(&dev_state->count);
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return dev_state;
-}
-
-static void free_device_state(struct device_state *dev_state)
-{
-       struct iommu_group *group;
-
-       /*
-        * First detach device from domain - No more PRI requests will arrive
-        * from that device after it is unbound from the IOMMUv2 domain.
-        */
-       group = iommu_group_get(&dev_state->pdev->dev);
-       if (WARN_ON(!group))
-               return;
-
-       iommu_detach_group(dev_state->domain, group);
-
-       iommu_group_put(group);
-
-       /* Everything is down now, free the IOMMUv2 domain */
-       iommu_domain_free(dev_state->domain);
-
-       /* Finally get rid of the device-state */
-       kfree(dev_state);
-}
-
-static void put_device_state(struct device_state *dev_state)
-{
-       if (atomic_dec_and_test(&dev_state->count))
-               wake_up(&dev_state->wq);
-}
-
-/* Must be called under dev_state->lock */
-static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
-                                                 int pasid, bool alloc)
-{
-       struct pasid_state **root, **ptr;
-       int level, index;
-
-       level = dev_state->pasid_levels;
-       root  = dev_state->states;
-
-       while (true) {
-
-               index = (pasid >> (9 * level)) & 0x1ff;
-               ptr   = &root[index];
-
-               if (level == 0)
-                       break;
-
-               if (*ptr == NULL) {
-                       if (!alloc)
-                               return NULL;
-
-                       *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
-                       if (*ptr == NULL)
-                               return NULL;
-               }
-
-               root   = (struct pasid_state **)*ptr;
-               level -= 1;
-       }
-
-       return ptr;
-}
-
-static int set_pasid_state(struct device_state *dev_state,
-                          struct pasid_state *pasid_state,
-                          int pasid)
-{
-       struct pasid_state **ptr;
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
-       ret = -ENOMEM;
-       if (ptr == NULL)
-               goto out_unlock;
-
-       ret = -ENOMEM;
-       if (*ptr != NULL)
-               goto out_unlock;
-
-       *ptr = pasid_state;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-
-       return ret;
-}
-
-static void clear_pasid_state(struct device_state *dev_state, int pasid)
-{
-       struct pasid_state **ptr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
-       if (ptr == NULL)
-               goto out_unlock;
-
-       *ptr = NULL;
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-}
-
-static struct pasid_state *get_pasid_state(struct device_state *dev_state,
-                                          int pasid)
-{
-       struct pasid_state **ptr, *ret = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, false);
-
-       if (ptr == NULL)
-               goto out_unlock;
-
-       ret = *ptr;
-       if (ret)
-               atomic_inc(&ret->count);
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-
-       return ret;
-}
-
-static void free_pasid_state(struct pasid_state *pasid_state)
-{
-       kfree(pasid_state);
-}
-
-static void put_pasid_state(struct pasid_state *pasid_state)
-{
-       if (atomic_dec_and_test(&pasid_state->count))
-               wake_up(&pasid_state->wq);
-}
-
-static void put_pasid_state_wait(struct pasid_state *pasid_state)
-{
-       atomic_dec(&pasid_state->count);
-       wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
-       free_pasid_state(pasid_state);
-}
-
-static void unbind_pasid(struct pasid_state *pasid_state)
-{
-       struct iommu_domain *domain;
-
-       domain = pasid_state->device_state->domain;
-
-       /*
-        * Mark pasid_state as invalid, no more faults will we added to the
-        * work queue after this is visible everywhere.
-        */
-       pasid_state->invalid = true;
-
-       /* Make sure this is visible */
-       smp_wmb();
-
-       /* After this the device/pasid can't access the mm anymore */
-       amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
-
-       /* Make sure no more pending faults are in the queue */
-       flush_workqueue(iommu_wq);
-}
-
-static void free_pasid_states_level1(struct pasid_state **tbl)
-{
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (tbl[i] == NULL)
-                       continue;
-
-               free_page((unsigned long)tbl[i]);
-       }
-}
-
-static void free_pasid_states_level2(struct pasid_state **tbl)
-{
-       struct pasid_state **ptr;
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (tbl[i] == NULL)
-                       continue;
-
-               ptr = (struct pasid_state **)tbl[i];
-               free_pasid_states_level1(ptr);
-       }
-}
-
-static void free_pasid_states(struct device_state *dev_state)
-{
-       struct pasid_state *pasid_state;
-       int i;
-
-       for (i = 0; i < dev_state->max_pasids; ++i) {
-               pasid_state = get_pasid_state(dev_state, i);
-               if (pasid_state == NULL)
-                       continue;
-
-               put_pasid_state(pasid_state);
-
-               /*
-                * This will call the mn_release function and
-                * unbind the PASID
-                */
-               mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
-               put_pasid_state_wait(pasid_state); /* Reference taken in
-                                                     amd_iommu_bind_pasid */
-
-               /* Drop reference taken in amd_iommu_bind_pasid */
-               put_device_state(dev_state);
-       }
-
-       if (dev_state->pasid_levels == 2)
-               free_pasid_states_level2(dev_state->states);
-       else if (dev_state->pasid_levels == 1)
-               free_pasid_states_level1(dev_state->states);
-       else
-               BUG_ON(dev_state->pasid_levels != 0);
-
-       free_page((unsigned long)dev_state->states);
-}
-
-static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
-{
-       return container_of(mn, struct pasid_state, mn);
-}
-
-static void mn_invalidate_range(struct mmu_notifier *mn,
-                               struct mm_struct *mm,
-                               unsigned long start, unsigned long end)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-
-       pasid_state = mn_to_state(mn);
-       dev_state   = pasid_state->device_state;
-
-       if ((start ^ (end - 1)) < PAGE_SIZE)
-               amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
-                                    start);
-       else
-               amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
-}
-
-static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       bool run_inv_ctx_cb;
-
-       might_sleep();
-
-       pasid_state    = mn_to_state(mn);
-       dev_state      = pasid_state->device_state;
-       run_inv_ctx_cb = !pasid_state->invalid;
-
-       if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
-               dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
-
-       unbind_pasid(pasid_state);
-}
-
-static const struct mmu_notifier_ops iommu_mn = {
-       .release                = mn_release,
-       .invalidate_range       = mn_invalidate_range,
-};
-
-static void set_pri_tag_status(struct pasid_state *pasid_state,
-                              u16 tag, int status)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       pasid_state->pri[tag].status = status;
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void finish_pri_tag(struct device_state *dev_state,
-                          struct pasid_state *pasid_state,
-                          u16 tag)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
-           pasid_state->pri[tag].finish) {
-               amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
-                                      pasid_state->pri[tag].status, tag);
-               pasid_state->pri[tag].finish = false;
-               pasid_state->pri[tag].status = PPR_SUCCESS;
-       }
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void handle_fault_error(struct fault *fault)
-{
-       int status;
-
-       if (!fault->dev_state->inv_ppr_cb) {
-               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
-               return;
-       }
-
-       status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
-                                             fault->pasid,
-                                             fault->address,
-                                             fault->flags);
-       switch (status) {
-       case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
-               set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
-               break;
-       case AMD_IOMMU_INV_PRI_RSP_INVALID:
-               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
-               break;
-       case AMD_IOMMU_INV_PRI_RSP_FAIL:
-               set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static bool access_error(struct vm_area_struct *vma, struct fault *fault)
-{
-       unsigned long requested = 0;
-
-       if (fault->flags & PPR_FAULT_EXEC)
-               requested |= VM_EXEC;
-
-       if (fault->flags & PPR_FAULT_READ)
-               requested |= VM_READ;
-
-       if (fault->flags & PPR_FAULT_WRITE)
-               requested |= VM_WRITE;
-
-       return (requested & ~vma->vm_flags) != 0;
-}
-
-static void do_fault(struct work_struct *work)
-{
-       struct fault *fault = container_of(work, struct fault, work);
-       struct vm_area_struct *vma;
-       vm_fault_t ret = VM_FAULT_ERROR;
-       unsigned int flags = 0;
-       struct mm_struct *mm;
-       u64 address;
-
-       mm = fault->state->mm;
-       address = fault->address;
-
-       if (fault->flags & PPR_FAULT_USER)
-               flags |= FAULT_FLAG_USER;
-       if (fault->flags & PPR_FAULT_WRITE)
-               flags |= FAULT_FLAG_WRITE;
-       flags |= FAULT_FLAG_REMOTE;
-
-       down_read(&mm->mmap_sem);
-       vma = find_extend_vma(mm, address);
-       if (!vma || address < vma->vm_start)
-               /* failed to get a vma in the right range */
-               goto out;
-
-       /* Check if we have the right permissions on the vma */
-       if (access_error(vma, fault))
-               goto out;
-
-       ret = handle_mm_fault(vma, address, flags);
-out:
-       up_read(&mm->mmap_sem);
-
-       if (ret & VM_FAULT_ERROR)
-               /* failed to service fault */
-               handle_fault_error(fault);
-
-       finish_pri_tag(fault->dev_state, fault->state, fault->tag);
-
-       put_pasid_state(fault->state);
-
-       kfree(fault);
-}
-
-static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
-{
-       struct amd_iommu_fault *iommu_fault;
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       struct pci_dev *pdev = NULL;
-       unsigned long flags;
-       struct fault *fault;
-       bool finish;
-       u16 tag, devid;
-       int ret;
-
-       iommu_fault = data;
-       tag         = iommu_fault->tag & 0x1ff;
-       finish      = (iommu_fault->tag >> 9) & 1;
-
-       devid = iommu_fault->device_id;
-       pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
-                                          devid & 0xff);
-       if (!pdev)
-               return -ENODEV;
-
-       ret = NOTIFY_DONE;
-
-       /* In kdump kernel pci dev is not initialized yet -> send INVALID */
-       if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
-               amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
-                                      PPR_INVALID, tag);
-               goto out;
-       }
-
-       dev_state = get_device_state(iommu_fault->device_id);
-       if (dev_state == NULL)
-               goto out;
-
-       pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
-       if (pasid_state == NULL || pasid_state->invalid) {
-               /* We know the device but not the PASID -> send INVALID */
-               amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
-                                      PPR_INVALID, tag);
-               goto out_drop_state;
-       }
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       atomic_inc(&pasid_state->pri[tag].inflight);
-       if (finish)
-               pasid_state->pri[tag].finish = true;
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-
-       fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
-       if (fault == NULL) {
-               /* We are OOM - send success and let the device re-fault */
-               finish_pri_tag(dev_state, pasid_state, tag);
-               goto out_drop_state;
-       }
-
-       fault->dev_state = dev_state;
-       fault->address   = iommu_fault->address;
-       fault->state     = pasid_state;
-       fault->tag       = tag;
-       fault->finish    = finish;
-       fault->pasid     = iommu_fault->pasid;
-       fault->flags     = iommu_fault->flags;
-       INIT_WORK(&fault->work, do_fault);
-
-       queue_work(iommu_wq, &fault->work);
-
-       ret = NOTIFY_OK;
-
-out_drop_state:
-
-       if (ret != NOTIFY_OK && pasid_state)
-               put_pasid_state(pasid_state);
-
-       put_device_state(dev_state);
-
-out:
-       return ret;
-}
-
-static struct notifier_block ppr_nb = {
-       .notifier_call = ppr_notifier,
-};
-
-int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
-                        struct task_struct *task)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       struct mm_struct *mm;
-       u16 devid;
-       int ret;
-
-       might_sleep();
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       devid     = device_id(pdev);
-       dev_state = get_device_state(devid);
-
-       if (dev_state == NULL)
-               return -EINVAL;
-
-       ret = -EINVAL;
-       if (pasid < 0 || pasid >= dev_state->max_pasids)
-               goto out;
-
-       ret = -ENOMEM;
-       pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
-       if (pasid_state == NULL)
-               goto out;
-
-
-       atomic_set(&pasid_state->count, 1);
-       init_waitqueue_head(&pasid_state->wq);
-       spin_lock_init(&pasid_state->lock);
-
-       mm                        = get_task_mm(task);
-       pasid_state->mm           = mm;
-       pasid_state->device_state = dev_state;
-       pasid_state->pasid        = pasid;
-       pasid_state->invalid      = true; /* Mark as valid only if we are
-                                            done with setting up the pasid */
-       pasid_state->mn.ops       = &iommu_mn;
-
-       if (pasid_state->mm == NULL)
-               goto out_free;
-
-       mmu_notifier_register(&pasid_state->mn, mm);
-
-       ret = set_pasid_state(dev_state, pasid_state, pasid);
-       if (ret)
-               goto out_unregister;
-
-       ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
-                                       __pa(pasid_state->mm->pgd));
-       if (ret)
-               goto out_clear_state;
-
-       /* Now we are ready to handle faults */
-       pasid_state->invalid = false;
-
-       /*
-        * Drop the reference to the mm_struct here. We rely on the
-        * mmu_notifier release call-back to inform us when the mm
-        * is going away.
-        */
-       mmput(mm);
-
-       return 0;
-
-out_clear_state:
-       clear_pasid_state(dev_state, pasid);
-
-out_unregister:
-       mmu_notifier_unregister(&pasid_state->mn, mm);
-       mmput(mm);
-
-out_free:
-       free_pasid_state(pasid_state);
-
-out:
-       put_device_state(dev_state);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_bind_pasid);
-
-void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       u16 devid;
-
-       might_sleep();
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       devid = device_id(pdev);
-       dev_state = get_device_state(devid);
-       if (dev_state == NULL)
-               return;
-
-       if (pasid < 0 || pasid >= dev_state->max_pasids)
-               goto out;
-
-       pasid_state = get_pasid_state(dev_state, pasid);
-       if (pasid_state == NULL)
-               goto out;
-       /*
-        * Drop reference taken here. We are safe because we still hold
-        * the reference taken in the amd_iommu_bind_pasid function.
-        */
-       put_pasid_state(pasid_state);
-
-       /* Clear the pasid state so that the pasid can be re-used */
-       clear_pasid_state(dev_state, pasid_state->pasid);
-
-       /*
-        * Call mmu_notifier_unregister to drop our reference
-        * to pasid_state->mm
-        */
-       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
-       put_pasid_state_wait(pasid_state); /* Reference taken in
-                                             amd_iommu_bind_pasid */
-out:
-       /* Drop reference taken in this function */
-       put_device_state(dev_state);
-
-       /* Drop reference taken in amd_iommu_bind_pasid */
-       put_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_unbind_pasid);
-
-int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
-{
-       struct device_state *dev_state;
-       struct iommu_group *group;
-       unsigned long flags;
-       int ret, tmp;
-       u16 devid;
-
-       might_sleep();
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       if (pasids <= 0 || pasids > (PASID_MASK + 1))
-               return -EINVAL;
-
-       devid = device_id(pdev);
-
-       dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
-       if (dev_state == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&dev_state->lock);
-       init_waitqueue_head(&dev_state->wq);
-       dev_state->pdev  = pdev;
-       dev_state->devid = devid;
-
-       tmp = pasids;
-       for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
-               dev_state->pasid_levels += 1;
-
-       atomic_set(&dev_state->count, 1);
-       dev_state->max_pasids = pasids;
-
-       ret = -ENOMEM;
-       dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
-       if (dev_state->states == NULL)
-               goto out_free_dev_state;
-
-       dev_state->domain = iommu_domain_alloc(&pci_bus_type);
-       if (dev_state->domain == NULL)
-               goto out_free_states;
-
-       amd_iommu_domain_direct_map(dev_state->domain);
-
-       ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
-       if (ret)
-               goto out_free_domain;
-
-       group = iommu_group_get(&pdev->dev);
-       if (!group) {
-               ret = -EINVAL;
-               goto out_free_domain;
-       }
-
-       ret = iommu_attach_group(dev_state->domain, group);
-       if (ret != 0)
-               goto out_drop_group;
-
-       iommu_group_put(group);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       if (__get_device_state(devid) != NULL) {
-               spin_unlock_irqrestore(&state_lock, flags);
-               ret = -EBUSY;
-               goto out_free_domain;
-       }
-
-       list_add_tail(&dev_state->list, &state_list);
-
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return 0;
-
-out_drop_group:
-       iommu_group_put(group);
-
-out_free_domain:
-       iommu_domain_free(dev_state->domain);
-
-out_free_states:
-       free_page((unsigned long)dev_state->states);
-
-out_free_dev_state:
-       kfree(dev_state);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_init_device);
-
-void amd_iommu_free_device(struct pci_dev *pdev)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u16 devid;
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       devid = device_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       dev_state = __get_device_state(devid);
-       if (dev_state == NULL) {
-               spin_unlock_irqrestore(&state_lock, flags);
-               return;
-       }
-
-       list_del(&dev_state->list);
-
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       /* Get rid of any remaining pasid states */
-       free_pasid_states(dev_state);
-
-       put_device_state(dev_state);
-       /*
-        * Wait until the last reference is dropped before freeing
-        * the device state.
-        */
-       wait_event(dev_state->wq, !atomic_read(&dev_state->count));
-       free_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_free_device);
-
-int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
-                                amd_iommu_invalid_ppr_cb cb)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u16 devid;
-       int ret;
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       devid = device_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       ret = -EINVAL;
-       dev_state = __get_device_state(devid);
-       if (dev_state == NULL)
-               goto out_unlock;
-
-       dev_state->inv_ppr_cb = cb;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
-
-int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
-                                   amd_iommu_invalidate_ctx cb)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u16 devid;
-       int ret;
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       devid = device_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       ret = -EINVAL;
-       dev_state = __get_device_state(devid);
-       if (dev_state == NULL)
-               goto out_unlock;
-
-       dev_state->inv_ctx_cb = cb;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
-
-static int __init amd_iommu_v2_init(void)
-{
-       int ret;
-
-       pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
-
-       if (!amd_iommu_v2_supported()) {
-               pr_info("AMD IOMMUv2 functionality not available on this system\n");
-               /*
-                * Load anyway to provide the symbols to other modules
-                * which may use AMD IOMMUv2 optionally.
-                */
-               return 0;
-       }
-
-       spin_lock_init(&state_lock);
-
-       ret = -ENOMEM;
-       iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
-       if (iommu_wq == NULL)
-               goto out;
-
-       amd_iommu_register_ppr_notifier(&ppr_nb);
-
-       return 0;
-
-out:
-       return ret;
-}
-
-static void __exit amd_iommu_v2_exit(void)
-{
-       struct device_state *dev_state;
-       int i;
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       amd_iommu_unregister_ppr_notifier(&ppr_nb);
-
-       flush_workqueue(iommu_wq);
-
-       /*
-        * The loop below might call flush_workqueue(), so call
-        * destroy_workqueue() after it
-        */
-       for (i = 0; i < MAX_DEVICES; ++i) {
-               dev_state = get_device_state(i);
-
-               if (dev_state == NULL)
-                       continue;
-
-               WARN_ON_ONCE(1);
-
-               put_device_state(dev_state);
-               amd_iommu_free_device(dev_state->pdev);
-       }
-
-       destroy_workqueue(iommu_wq);
-}
-
-module_init(amd_iommu_v2_init);
-module_exit(amd_iommu_v2_exit);