iommu/vt-d: Add __iommu_flush_iotlb_psi()
authorYi Liu <yi.l.liu@intel.com>
Mon, 19 Feb 2024 11:15:53 +0000 (19:15 +0800)
committerJoerg Roedel <jroedel@suse.de>
Wed, 21 Feb 2024 09:28:43 +0000 (10:28 +0100)
Add __iommu_flush_iotlb_psi() to do the psi iotlb flush with a DID input
rather than calculating it within the helper.

This is useful when flushing cache for parent domain which reuses DIDs of
its nested domains.

Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240208082307.15759-3-yi.l.liu@intel.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.c

index e393c62776f306f17e752f4d8b545cd765c59451..dbdb8366c42ae638c1c5dafedf44594e73014860 100644 (file)
@@ -1368,6 +1368,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
        spin_unlock_irqrestore(&domain->lock, flags);
 }
 
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+                                   unsigned long pfn, unsigned int pages,
+                                   int ih)
+{
+       unsigned int aligned_pages = __roundup_pow_of_two(pages);
+       unsigned long bitmask = aligned_pages - 1;
+       unsigned int mask = ilog2(aligned_pages);
+       u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+       /*
+        * PSI masks the low order bits of the base address. If the
+        * address isn't aligned to the mask, then compute a mask value
+        * needed to ensure the target range is flushed.
+        */
+       if (unlikely(bitmask & pfn)) {
+               unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+               /*
+                * Since end_pfn <= pfn + bitmask, the only way bits
+                * higher than bitmask can differ in pfn and end_pfn is
+                * by carrying. This means after masking out bitmask,
+                * high bits starting with the first set bit in
+                * shared_bits are all equal in both pfn and end_pfn.
+                */
+               shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+               mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+       }
+
+       /*
+        * Fallback to domain selective flush if no PSI support or
+        * the size is too big.
+        */
+       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+               iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                        DMA_TLB_DSI_FLUSH);
+       else
+               iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+                                        DMA_TLB_PSI_FLUSH);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
@@ -1384,42 +1424,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
        if (ih)
                ih = 1 << 6;
 
-       if (domain->use_first_level) {
+       if (domain->use_first_level)
                domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
-       } else {
-               unsigned long bitmask = aligned_pages - 1;
-
-               /*
-                * PSI masks the low order bits of the base address. If the
-                * address isn't aligned to the mask, then compute a mask value
-                * needed to ensure the target range is flushed.
-                */
-               if (unlikely(bitmask & pfn)) {
-                       unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
-                       /*
-                        * Since end_pfn <= pfn + bitmask, the only way bits
-                        * higher than bitmask can differ in pfn and end_pfn is
-                        * by carrying. This means after masking out bitmask,
-                        * high bits starting with the first set bit in
-                        * shared_bits are all equal in both pfn and end_pfn.
-                        */
-                       shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
-                       mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
-               }
-
-               /*
-                * Fallback to domain selective flush if no PSI support or
-                * the size is too big.
-                */
-               if (!cap_pgsel_inv(iommu->cap) ||
-                   mask > cap_max_amask_val(iommu->cap))
-                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                                       DMA_TLB_DSI_FLUSH);
-               else
-                       iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-                                                       DMA_TLB_PSI_FLUSH);
-       }
+       else
+               __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
 
        /*
         * In caching mode, changes of pages from non-present to present require