iommu/io-pgtable: Remove tlb_flush_leaf
authorRobin Murphy <robin.murphy@arm.com>
Wed, 25 Nov 2020 17:29:39 +0000 (17:29 +0000)
committerWill Deacon <will@kernel.org>
Tue, 8 Dec 2020 15:23:37 +0000 (15:23 +0000)
The only user of tlb_flush_leaf is a particularly hairy corner of the
Arm short-descriptor code, which wants a synchronous invalidation to
minimise the races inherent in trying to split a large page mapping.
This is already far enough into "here be dragons" territory that no
sensible caller should ever hit it, and thus it really doesn't need
optimising. Although using tlb_flush_walk there may technically be
more heavyweight than needed, it does the job and saves everyone else
having to carry around useless baggage.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/9844ab0c5cb3da8b2f89c6c2da16941910702b41.1606324115.git.robin.murphy@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/arm/arm-smmu/qcom_iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
include/linux/io-pgtable.h

index 22ac7c692a81d30f9a6d7da60a75842efc69c4a3..50d881794758c35d2ab40c023d72066f3f9f9319 100644 (file)
@@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops null_tlb_ops = {
        .tlb_flush_all = msm_iommu_tlb_flush_all,
        .tlb_flush_walk = msm_iommu_tlb_flush_walk,
-       .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
        .tlb_add_page = msm_iommu_tlb_add_page,
 };
 
index 776448c527ea9c8cb650e858201315237a6389b9..c186914cc4f99fb506641cd885afe5feeb3601a3 100644 (file)
@@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
        mmu_tlb_sync_context(cookie);
 }
 
-static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
-                              void *cookie)
-{
-       mmu_tlb_sync_context(cookie);
-}
-
 static const struct iommu_flush_ops mmu_tlb_ops = {
        .tlb_flush_all  = mmu_tlb_inv_context_s1,
        .tlb_flush_walk = mmu_tlb_flush_walk,
-       .tlb_flush_leaf = mmu_tlb_flush_leaf,
 };
 
 int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
index 2ddf5ecc75f873bd88aff2d73ab25c20455c37dc..8ca7415d785d9bf5015544fea56cd52f17b90850 100644 (file)
@@ -1760,16 +1760,9 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
        arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
 }
 
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
-                                 size_t granule, void *cookie)
-{
-       arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
-}
-
 static const struct iommu_flush_ops arm_smmu_flush_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_flush_walk = arm_smmu_tlb_inv_walk,
-       .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
        .tlb_add_page   = arm_smmu_tlb_inv_page_nosync,
 };
 
index d8979bb71fc001f8bbe13c8072055ab92231381a..d8c6bfde6a61587864092c3df89789c0c7671f34 100644 (file)
@@ -333,14 +333,6 @@ static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
        arm_smmu_tlb_sync_context(cookie);
 }
 
-static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
-                                    size_t granule, void *cookie)
-{
-       arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
-                                 ARM_SMMU_CB_S1_TLBIVAL);
-       arm_smmu_tlb_sync_context(cookie);
-}
-
 static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
                                     unsigned long iova, size_t granule,
                                     void *cookie)
@@ -357,14 +349,6 @@ static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
        arm_smmu_tlb_sync_context(cookie);
 }
 
-static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
-                                    size_t granule, void *cookie)
-{
-       arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
-                                 ARM_SMMU_CB_S2_TLBIIPAS2L);
-       arm_smmu_tlb_sync_context(cookie);
-}
-
 static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
                                     unsigned long iova, size_t granule,
                                     void *cookie)
@@ -373,8 +357,8 @@ static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
                                  ARM_SMMU_CB_S2_TLBIIPAS2L);
 }
 
-static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
-                                      size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
+                                       size_t granule, void *cookie)
 {
        arm_smmu_tlb_inv_context_s2(cookie);
 }
@@ -401,21 +385,18 @@ static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
        .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
-       .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
        .tlb_add_page   = arm_smmu_tlb_add_page_s1,
 };
 
 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
        .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
-       .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
        .tlb_add_page   = arm_smmu_tlb_add_page_s2,
 };
 
 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
-       .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
-       .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+       .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
        .tlb_add_page   = arm_smmu_tlb_add_page_s2_v1,
 };
 
index b30d6c966e2c816150f021f4d69e8c693067228d..7f280c8d5c53d91eada408e45a4e969ed5ea755e 100644 (file)
@@ -185,13 +185,6 @@ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
        qcom_iommu_tlb_sync(cookie);
 }
 
-static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
-                                     size_t granule, void *cookie)
-{
-       qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
-       qcom_iommu_tlb_sync(cookie);
-}
-
 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
                                    unsigned long iova, size_t granule,
                                    void *cookie)
@@ -202,7 +195,6 @@ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops qcom_flush_ops = {
        .tlb_flush_all  = qcom_iommu_tlb_inv_context,
        .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
-       .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
        .tlb_add_page   = qcom_iommu_tlb_add_page,
 };
 
index 359b96b0fa3ea2a3ebd48ae8ba7e73ad55967101..1d92ac948db74c158188b156201846b60055926d 100644 (file)
@@ -584,7 +584,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
        __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
 
        size *= ARM_V7S_CONT_PAGES;
-       io_pgtable_tlb_flush_leaf(iop, iova, size, size);
+       io_pgtable_tlb_flush_walk(iop, iova, size, size);
        return pte;
 }
 
@@ -866,7 +866,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_flush_walk = dummy_tlb_flush,
-       .tlb_flush_leaf = dummy_tlb_flush,
        .tlb_add_page   = dummy_tlb_add_page,
 };
 
index 135f57b37bbd89a4ef8ab2051186341a04011480..49ad8d2a82df9508cfa1dbbd3ca108ada111793e 100644 (file)
@@ -1085,7 +1085,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_flush_walk = dummy_tlb_flush,
-       .tlb_flush_leaf = dummy_tlb_flush,
        .tlb_add_page   = dummy_tlb_add_page,
 };
 
index 0f18abda0e208a5e048b55ac3fbe18a5de717ef6..d71f10257f15929f6337b9cc13cc147998c59df5 100644 (file)
@@ -325,7 +325,6 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size,
 static const struct iommu_flush_ops ipmmu_flush_ops = {
        .tlb_flush_all = ipmmu_tlb_flush_all,
        .tlb_flush_walk = ipmmu_tlb_flush,
-       .tlb_flush_leaf = ipmmu_tlb_flush,
 };
 
 /* -----------------------------------------------------------------------------
index 3615cd6241c4d114bff8096182c079b4b0fd9d3e..040e85f70861d473d6880a980703fe805dc360fe 100644 (file)
@@ -174,12 +174,6 @@ static void __flush_iotlb_walk(unsigned long iova, size_t size,
        __flush_iotlb_range(iova, size, granule, false, cookie);
 }
 
-static void __flush_iotlb_leaf(unsigned long iova, size_t size,
-                              size_t granule, void *cookie)
-{
-       __flush_iotlb_range(iova, size, granule, true, cookie);
-}
-
 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t granule, void *cookie)
 {
@@ -189,7 +183,6 @@ static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops msm_iommu_flush_ops = {
        .tlb_flush_all = __flush_iotlb,
        .tlb_flush_walk = __flush_iotlb_walk,
-       .tlb_flush_leaf = __flush_iotlb_leaf,
        .tlb_add_page = __flush_iotlb_page,
 };
 
index c072cee532c206cce7bd4a56ce68dff3254fe8a8..8e56cec532e71dfd47d1ba0f37678da783b6cb70 100644 (file)
@@ -240,7 +240,6 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
 static const struct iommu_flush_ops mtk_iommu_flush_ops = {
        .tlb_flush_all = mtk_iommu_tlb_flush_all,
        .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
-       .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
        .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
 };
 
index fb4d5a763e0c8de8deab264d11b3739be253454c..ea727eb1a1a933a50dae13e44be6d75074c4e9c7 100644 (file)
@@ -25,8 +25,6 @@ enum io_pgtable_fmt {
  * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
  *                  (sometimes referred to as the "walk cache") for a virtual
  *                  address range.
- * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
- *                  address range.
  * @tlb_add_page:   Optional callback to queue up leaf TLB invalidation for a
  *                  single page.  IOMMUs that cannot batch TLB invalidation
  *                  operations efficiently will typically issue them here, but
@@ -40,8 +38,6 @@ struct iommu_flush_ops {
        void (*tlb_flush_all)(void *cookie);
        void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
                               void *cookie);
-       void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
-                              void *cookie);
        void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
                             unsigned long iova, size_t granule, void *cookie);
 };
@@ -228,13 +224,6 @@ io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
        iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
 }
 
-static inline void
-io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
-                         size_t size, size_t granule)
-{
-       iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
-}
-
 static inline void
 io_pgtable_tlb_add_page(struct io_pgtable *iop,
                        struct iommu_iotlb_gather * gather, unsigned long iova,