iommu/vt-d: Avoid duplicate removing in __domain_mapping()
authorLongpeng(Mike) <longpeng2@huawei.com>
Thu, 14 Oct 2021 05:38:39 +0000 (13:38 +0800)
committerJoerg Roedel <jroedel@suse.de>
Mon, 18 Oct 2021 10:31:48 +0000 (12:31 +0200)
The __domain_mapping() always removes the pages in the range from
'iov_pfn' to 'end_pfn', but the 'end_pfn' is always the last pfn
of the range that the caller wants to map.

This would introduce too many duplicated removing and leads the
map operation take too long, for example:

  Map iova=0x100000,nr_pages=0x7d61800
    iov_pfn: 0x100000, end_pfn: 0x7e617ff
    iov_pfn: 0x140000, end_pfn: 0x7e617ff
    iov_pfn: 0x180000, end_pfn: 0x7e617ff
    iov_pfn: 0x1c0000, end_pfn: 0x7e617ff
    iov_pfn: 0x200000, end_pfn: 0x7e617ff
    ...
  it takes about 50ms in total.

We can reduce the cost by recalculate the 'end_pfn' and limit it
to the boundary of the end of this pte page.

  Map iova=0x100000,nr_pages=0x7d61800
    iov_pfn: 0x100000, end_pfn: 0x13ffff
    iov_pfn: 0x140000, end_pfn: 0x17ffff
    iov_pfn: 0x180000, end_pfn: 0x1bffff
    iov_pfn: 0x1c0000, end_pfn: 0x1fffff
    iov_pfn: 0x200000, end_pfn: 0x23ffff
    ...
  it only need 9ms now.

This also removes a meaningless BUG_ON() in __domain_mapping().

Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Tested-by: Liujunjie <liujunjie23@huawei.com>
Link: https://lore.kernel.org/r/20211008000433.1115-1-longpeng2@huawei.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20211014053839.727419-10-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.c
include/linux/intel-iommu.h

index 16a35669a9d0a422f32221acb0198a2c96b7843d..0bde0c8b41269ce7f6654bfb49a2115d65d3935d 100644 (file)
@@ -2479,12 +2479,17 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                return -ENOMEM;
                        first_pte = pte;
 
+                       lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
                        /* It is large page*/
                        if (largepage_lvl > 1) {
                                unsigned long end_pfn;
+                               unsigned long pages_to_remove;
 
                                pteval |= DMA_PTE_LARGE_PAGE;
-                               end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
+                               pages_to_remove = min_t(unsigned long, nr_pages,
+                                                       nr_pte_to_next_page(pte) * lvl_pages);
+                               end_pfn = iov_pfn + pages_to_remove - 1;
                                switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
                        } else {
                                pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
@@ -2506,10 +2511,6 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                        WARN_ON(1);
                }
 
-               lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
-               BUG_ON(nr_pages < lvl_pages);
-
                nr_pages -= lvl_pages;
                iov_pfn += lvl_pages;
                phys_pfn += lvl_pages;
index 52481625838c43b6c648402b8865b6f6a8a39674..69230fd695eadbef4be38c86ecce4d9bf47c0556 100644 (file)
@@ -710,6 +710,12 @@ static inline bool first_pte_in_page(struct dma_pte *pte)
        return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
 }
 
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
+{
+       return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+               (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
+}
+
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);