iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->tlb_add_page()
authorWill Deacon <will@kernel.org>
Tue, 2 Jul 2019 15:45:15 +0000 (16:45 +0100)
committerWill Deacon <will@kernel.org>
Mon, 29 Jul 2019 16:22:59 +0000 (17:22 +0100)
With all the pieces in place, we can finally propagate the
iommu_iotlb_gather structure from the call to unmap() down to the IOMMU
drivers' implementation of ->tlb_add_page(). Currently everybody ignores
it, but the machinery is now there to defer invalidation.

Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/qcom_iommu.c
include/linux/io-pgtable.h

index 8e2e53079f489850d3ff38f8f626f6781b98c9dc..d1ebc7103065892fff6c119a94ffd6adf4bd22ba 100644 (file)
@@ -1596,7 +1596,8 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        } while (size -= granule);
 }
 
-static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule,
+static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
+                                        unsigned long iova, size_t granule,
                                         void *cookie)
 {
        arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
index f6689956ab6ebf4ddc839a78d6594a4084992740..5598d0ff71a82d77772283902424beafc0ba9200 100644 (file)
@@ -574,7 +574,8 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
        ops->tlb_sync(cookie);
 }
 
-static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
+static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
+                                 unsigned long iova, size_t granule,
                                  void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
index a7776e982b6c3e8bdd9d4397448d7b0f75a3e3e8..18e7d212c7de17d985943822328386ebe748a1e1 100644 (file)
@@ -362,7 +362,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
        return false;
 }
 
-static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *,
+                             struct iommu_iotlb_gather *, unsigned long,
                              size_t, int, arm_v7s_iopte *);
 
 static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
@@ -383,7 +384,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
                        size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
 
                        tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
-                       if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
+                       if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
                                                    sz, lvl, tblp) != sz))
                                return -EINVAL;
                } else if (ptep[i]) {
@@ -545,6 +546,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
 }
 
 static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+                                     struct iommu_iotlb_gather *gather,
                                      unsigned long iova, size_t size,
                                      arm_v7s_iopte blk_pte,
                                      arm_v7s_iopte *ptep)
@@ -581,14 +583,15 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
                        return 0;
 
                tablep = iopte_deref(pte, 1);
-               return __arm_v7s_unmap(data, iova, size, 2, tablep);
+               return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
        }
 
-       io_pgtable_tlb_add_page(&data->iop, iova, size);
+       io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
        return size;
 }
 
 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+                             struct iommu_iotlb_gather *gather,
                              unsigned long iova, size_t size, int lvl,
                              arm_v7s_iopte *ptep)
 {
@@ -647,7 +650,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
                                 */
                                smp_wmb();
                        } else {
-                               io_pgtable_tlb_add_page(iop, iova, blk_size);
+                               io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
                        }
                        iova += blk_size;
                }
@@ -657,12 +660,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
                 * Insert a table at the next level to map the old region,
                 * minus the part we want to unmap
                 */
-               return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep);
+               return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
+                                              ptep);
        }
 
        /* Keep on walkin' */
        ptep = iopte_deref(pte[0], lvl);
-       return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+       return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
 }
 
 static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
@@ -673,7 +677,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        if (WARN_ON(upper_32_bits(iova)))
                return 0;
 
-       return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+       return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
 }
 
 static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
@@ -808,7 +812,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
-static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
 {
        dummy_tlb_flush(iova, granule, granule, cookie);
 }
index 325430f8a0a134017046c233628ce2a32d87e2bb..4c91359057c53d906ee5496396e28595c2153905 100644 (file)
@@ -289,6 +289,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
 }
 
 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                              struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t size, int lvl,
                               arm_lpae_iopte *ptep);
 
@@ -334,8 +335,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
                size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 
                tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
-               if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+               if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
+                       WARN_ON(1);
                        return -EINVAL;
+               }
        }
 
        __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
@@ -536,6 +539,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 }
 
 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+                                      struct iommu_iotlb_gather *gather,
                                       unsigned long iova, size_t size,
                                       arm_lpae_iopte blk_pte, int lvl,
                                       arm_lpae_iopte *ptep)
@@ -581,14 +585,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 
                tablep = iopte_deref(pte, data);
        } else if (unmap_idx >= 0) {
-               io_pgtable_tlb_add_page(&data->iop, iova, size);
+               io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
                return size;
        }
 
-       return __arm_lpae_unmap(data, iova, size, lvl, tablep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
 }
 
 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                              struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t size, int lvl,
                               arm_lpae_iopte *ptep)
 {
@@ -622,7 +627,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
                         */
                        smp_wmb();
                } else {
-                       io_pgtable_tlb_add_page(iop, iova, size);
+                       io_pgtable_tlb_add_page(iop, gather, iova, size);
                }
 
                return size;
@@ -631,13 +636,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
                 * Insert a table at the next level to map the old region,
                 * minus the part we want to unmap
                 */
-               return arm_lpae_split_blk_unmap(data, iova, size, pte,
+               return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
                                                lvl + 1, ptep);
        }
 
        /* Keep on walkin' */
        ptep = iopte_deref(pte, data);
-       return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
 }
 
 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
@@ -650,7 +655,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
                return 0;
 
-       return __arm_lpae_unmap(data, iova, size, lvl, ptep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
 }
 
 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -1074,7 +1079,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
-static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
 {
        dummy_tlb_flush(iova, granule, granule, cookie);
 }
index 8a0dcaf0a9e988bb5ccda0dcc66037c923bcc891..4c0be5b75c284dbd530a78647c2c374d880f7cc0 100644 (file)
@@ -180,7 +180,8 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size,
        __flush_iotlb_range(iova, size, granule, true, cookie);
 }
 
-static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie)
+static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
 {
        __flush_iotlb_range(iova, granule, granule, true, cookie);
 }
index b73cffd63262cf8bdac0d6f3246b04960b02bc40..0827d51936faeaeeb08deed989f0b8025538e12b 100644 (file)
@@ -202,7 +202,8 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
        mtk_iommu_tlb_sync(cookie);
 }
 
-static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule,
+static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
+                                           unsigned long iova, size_t granule,
                                            void *cookie)
 {
        mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
index 48b288ef74b413442e0bbd5cd3d19da98c569704..eac760cdbb28d069ea7e409b19bd599b5577227f 100644 (file)
@@ -178,7 +178,8 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
        qcom_iommu_tlb_sync(cookie);
 }
 
-static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule,
+static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+                                   unsigned long iova, size_t granule,
                                    void *cookie)
 {
        qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
index fe27d93c8ad9e59f4b1b49d2f02a49673d193c95..6b1b8be3ebec5d60e53bf6e0e9aa69c127ab7a5e 100644 (file)
@@ -28,10 +28,10 @@ enum io_pgtable_fmt {
  * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
  *                  address range.
  * @tlb_add_page:   Optional callback to queue up leaf TLB invalidation for a
- *                  single page. This function exists purely as an optimisation
- *                  for IOMMUs that cannot batch TLB invalidation operations
- *                  efficiently and are therefore better suited to issuing them
- *                  early rather than deferring them until iommu_tlb_sync().
+ *                  single page.  IOMMUs that cannot batch TLB invalidation
+ *                  operations efficiently will typically issue them here, but
+ *                  others may decide to update the iommu_iotlb_gather structure
+ *                  and defer the invalidation until iommu_tlb_sync() instead.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -42,7 +42,8 @@ struct iommu_flush_ops {
                               void *cookie);
        void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
                               void *cookie);
-       void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
+       void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
+                            unsigned long iova, size_t granule, void *cookie);
 };
 
 /**
@@ -209,11 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
 }
 
 static inline void
-io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova,
+io_pgtable_tlb_add_page(struct io_pgtable *iop,
+                       struct iommu_iotlb_gather * gather, unsigned long iova,
                        size_t granule)
 {
        if (iop->cfg.tlb->tlb_add_page)
-               iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
+               iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
 }
 
 /**