iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops
authorWill Deacon <will@kernel.org>
Tue, 2 Jul 2019 15:43:34 +0000 (16:43 +0100)
committerWill Deacon <will@kernel.org>
Wed, 24 Jul 2019 12:32:33 +0000 (13:32 +0100)
In preparation for TLB flush gathering in the IOMMU API, rename the
iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which
better describes its purpose and avoids the potential for confusion
between different levels of the API.

$ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g'

Signed-off-by: Will Deacon <will@kernel.org>
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/qcom_iommu.c
include/linux/io-pgtable.h

index 92ac995dd9c66be77484f5f634d463098607f91a..17bceb11e708451bd9f04d534ee02de62e10759f 100644 (file)
@@ -257,7 +257,7 @@ static void mmu_tlb_sync_context(void *cookie)
        // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
 }
 
-static const struct iommu_gather_ops mmu_tlb_ops = {
+static const struct iommu_flush_ops mmu_tlb_ops = {
        .tlb_flush_all  = mmu_tlb_inv_context_s1,
        .tlb_add_flush  = mmu_tlb_inv_range_nosync,
        .tlb_sync       = mmu_tlb_sync_context,
index a9a9fabd396804a26b77039d0fe804cea3dc56dc..7e137e1e28f18ea776f0eb983764c92da2523837 100644 (file)
@@ -1603,7 +1603,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        } while (size -= granule);
 }
 
-static const struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_flush_ops arm_smmu_flush_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
@@ -1796,7 +1796,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
                .ias            = ias,
                .oas            = oas,
                .coherent_walk  = smmu->features & ARM_SMMU_FEAT_COHERENCY,
-               .tlb            = &arm_smmu_gather_ops,
+               .tlb            = &arm_smmu_flush_ops,
                .iommu_dev      = smmu->dev,
        };
 
index 64977c131ee62aeeaea4865989ba8b378c924aeb..dc08db347ef3add4676877c4b8ee32ac6c74c9c8 100644 (file)
@@ -251,7 +251,7 @@ enum arm_smmu_domain_stage {
 struct arm_smmu_domain {
        struct arm_smmu_device          *smmu;
        struct io_pgtable_ops           *pgtbl_ops;
-       const struct iommu_gather_ops   *tlb_ops;
+       const struct iommu_flush_ops    *tlb_ops;
        struct arm_smmu_cfg             cfg;
        enum arm_smmu_domain_stage      stage;
        bool                            non_strict;
@@ -547,19 +547,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
        writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
 }
 
-static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync_context,
 };
 
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync_context,
 };
 
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
        .tlb_add_flush  = arm_smmu_tlb_inv_vmid_nosync,
        .tlb_sync       = arm_smmu_tlb_sync_vmid,
index a62733c6a632f9cbd06934cc749f9dd10d703a71..116f97ee991ec71120853c66a41486fec1070d4a 100644 (file)
@@ -817,7 +817,7 @@ static void dummy_tlb_sync(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static const struct iommu_gather_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_add_flush  = dummy_tlb_add_flush,
        .tlb_sync       = dummy_tlb_sync,
index 0d6633921c1ece00d6e17cde9ec4138243c2b207..402f913b6f6dc96b34bca2fc7dd951795b4a8ca1 100644 (file)
@@ -1081,7 +1081,7 @@ static void dummy_tlb_sync(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_add_flush  = dummy_tlb_add_flush,
        .tlb_sync       = dummy_tlb_sync,
index ad0098c0c87c7544f18b70c0bd9dc43913ca0478..2c14a2c65b22257aa27867eb21460afa28c3731c 100644 (file)
@@ -367,7 +367,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
        /* The hardware doesn't support selective TLB flush. */
 }
 
-static const struct iommu_gather_ops ipmmu_gather_ops = {
+static const struct iommu_flush_ops ipmmu_flush_ops = {
        .tlb_flush_all = ipmmu_tlb_flush_all,
        .tlb_add_flush = ipmmu_tlb_add_flush,
        .tlb_sync = ipmmu_tlb_flush_all,
@@ -480,7 +480,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
-       domain->cfg.tlb = &ipmmu_gather_ops;
+       domain->cfg.tlb = &ipmmu_flush_ops;
        domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
        domain->io_domain.geometry.force_aperture = true;
        /*
index b25e2eb9e038d9460a94b487d66c718693f3c8cc..8b602384a385b5f0613352a37b24cb562a2b9d39 100644 (file)
@@ -178,7 +178,7 @@ static void __flush_iotlb_sync(void *cookie)
         */
 }
 
-static const struct iommu_gather_ops msm_iommu_gather_ops = {
+static const struct iommu_flush_ops msm_iommu_flush_ops = {
        .tlb_flush_all = __flush_iotlb,
        .tlb_add_flush = __flush_iotlb_range,
        .tlb_sync = __flush_iotlb_sync,
@@ -345,7 +345,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
                .ias = 32,
                .oas = 32,
-               .tlb = &msm_iommu_gather_ops,
+               .tlb = &msm_iommu_flush_ops,
                .iommu_dev = priv->dev,
        };
 
index 82e4be4dfdaf84d17cc662fb7db2708cbeaf197c..fed77658d67e076c4c873684b22e986ebe11cad4 100644 (file)
@@ -188,7 +188,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
        }
 }
 
-static const struct iommu_gather_ops mtk_iommu_gather_ops = {
+static const struct iommu_flush_ops mtk_iommu_flush_ops = {
        .tlb_flush_all = mtk_iommu_tlb_flush_all,
        .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
        .tlb_sync = mtk_iommu_tlb_sync,
@@ -267,7 +267,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
                .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
                .ias = 32,
                .oas = 32,
-               .tlb = &mtk_iommu_gather_ops,
+               .tlb = &mtk_iommu_flush_ops,
                .iommu_dev = data->dev,
        };
 
index 34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116..fd9d9f4da735f0f2f219cf96f034a7de4c00876e 100644 (file)
@@ -164,7 +164,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        }
 }
 
-static const struct iommu_gather_ops qcom_gather_ops = {
+static const struct iommu_flush_ops qcom_flush_ops = {
        .tlb_flush_all  = qcom_iommu_tlb_inv_context,
        .tlb_add_flush  = qcom_iommu_tlb_inv_range_nosync,
        .tlb_sync       = qcom_iommu_tlb_sync,
@@ -215,7 +215,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                .pgsize_bitmap  = qcom_iommu_ops.pgsize_bitmap,
                .ias            = 32,
                .oas            = 40,
-               .tlb            = &qcom_gather_ops,
+               .tlb            = &qcom_flush_ops,
                .iommu_dev      = qcom_iommu->dev,
        };
 
index b5a450a3bb47a6f25beafa9510902af070f14dbd..6292ea15d6749d1c9f0a9a497510ee57b482f5b8 100644 (file)
@@ -17,7 +17,7 @@ enum io_pgtable_fmt {
 };
 
 /**
- * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
  *
  * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
@@ -28,7 +28,7 @@ enum io_pgtable_fmt {
  * Note that these can all be called in atomic context and must therefore
  * not block.
  */
-struct iommu_gather_ops {
+struct iommu_flush_ops {
        void (*tlb_flush_all)(void *cookie);
        void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
                              bool leaf, void *cookie);
@@ -84,7 +84,7 @@ struct io_pgtable_cfg {
        unsigned int                    ias;
        unsigned int                    oas;
        bool                            coherent_walk;
-       const struct iommu_gather_ops   *tlb;
+       const struct iommu_flush_ops    *tlb;
        struct device                   *iommu_dev;
 
        /* Low-level data specific to the table format */