vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Wed, 21 Mar 2018 18:46:19 +0000 (12:46 -0600)
committerAlex Williamson <alex.williamson@redhat.com>
Wed, 21 Mar 2018 18:46:19 +0000 (12:46 -0600)
VFIO IOMMU type1 currently upmaps IOVA pages synchronously, which requires
IOTLB flushing for every unmapping. This results in large IOTLB flushing
overhead when handling pass-through devices has a large number of mapped
IOVAs. This can be avoided by using the new IOTLB flushing interface.

Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
[aw - use LIST_HEAD]
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/vfio_iommu_type1.c

index 45657e2b1ff77b813b546239dd11ee97dfb2fcb4..3c082451ab1a00da8577978a61276c0341930e3d 100644 (file)
@@ -102,6 +102,13 @@ struct vfio_pfn {
        atomic_t                ref_count;
 };
 
+struct vfio_regions {
+       struct list_head list;
+       dma_addr_t iova;
+       phys_addr_t phys;
+       size_t len;
+};
+
 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)        \
                                        (!list_empty(&iommu->domain_list))
 
@@ -660,11 +667,102 @@ unpin_exit:
        return i > npage ? npage : (i > 0 ? i : -EINVAL);
 }
 
+static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
+                               struct list_head *regions)
+{
+       long unlocked = 0;
+       struct vfio_regions *entry, *next;
+
+       iommu_tlb_sync(domain->domain);
+
+       list_for_each_entry_safe(entry, next, regions, list) {
+               unlocked += vfio_unpin_pages_remote(dma,
+                                                   entry->iova,
+                                                   entry->phys >> PAGE_SHIFT,
+                                                   entry->len >> PAGE_SHIFT,
+                                                   false);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+
+       cond_resched();
+
+       return unlocked;
+}
+
+/*
+ * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
+ * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
+ * of these regions (currently using a list).
+ *
+ * This value specifies maximum number of regions for each IOTLB flush sync.
+ */
+#define VFIO_IOMMU_TLB_SYNC_MAX                512
+
+static size_t unmap_unpin_fast(struct vfio_domain *domain,
+                              struct vfio_dma *dma, dma_addr_t *iova,
+                              size_t len, phys_addr_t phys, long *unlocked,
+                              struct list_head *unmapped_list,
+                              int *unmapped_cnt)
+{
+       size_t unmapped = 0;
+       struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+       if (entry) {
+               unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+
+               if (!unmapped) {
+                       kfree(entry);
+               } else {
+                       iommu_tlb_range_add(domain->domain, *iova, unmapped);
+                       entry->iova = *iova;
+                       entry->phys = phys;
+                       entry->len  = unmapped;
+                       list_add_tail(&entry->list, unmapped_list);
+
+                       *iova += unmapped;
+                       (*unmapped_cnt)++;
+               }
+       }
+
+       /*
+        * Sync if the number of fast-unmap regions hits the limit
+        * or in case of errors.
+        */
+       if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
+               *unlocked += vfio_sync_unpin(dma, domain,
+                                            unmapped_list);
+               *unmapped_cnt = 0;
+       }
+
+       return unmapped;
+}
+
+static size_t unmap_unpin_slow(struct vfio_domain *domain,
+                              struct vfio_dma *dma, dma_addr_t *iova,
+                              size_t len, phys_addr_t phys,
+                              long *unlocked)
+{
+       size_t unmapped = iommu_unmap(domain->domain, *iova, len);
+
+       if (unmapped) {
+               *unlocked += vfio_unpin_pages_remote(dma, *iova,
+                                                    phys >> PAGE_SHIFT,
+                                                    unmapped >> PAGE_SHIFT,
+                                                    false);
+               *iova += unmapped;
+               cond_resched();
+       }
+       return unmapped;
+}
+
 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
                             bool do_accounting)
 {
        dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
        struct vfio_domain *domain, *d;
+       LIST_HEAD(unmapped_region_list);
+       int unmapped_region_cnt = 0;
        long unlocked = 0;
 
        if (!dma->size)
@@ -710,20 +808,26 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
                                break;
                }
 
-               unmapped = iommu_unmap(domain->domain, iova, len);
-               if (WARN_ON(!unmapped))
-                       break;
-
-               unlocked += vfio_unpin_pages_remote(dma, iova,
-                                                   phys >> PAGE_SHIFT,
-                                                   unmapped >> PAGE_SHIFT,
-                                                   false);
-               iova += unmapped;
-
-               cond_resched();
+               /*
+                * First, try to use fast unmap/unpin. In case of failure,
+                * switch to slow unmap/unpin path.
+                */
+               unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+                                           &unlocked, &unmapped_region_list,
+                                           &unmapped_region_cnt);
+               if (!unmapped) {
+                       unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+                                                   phys, &unlocked);
+                       if (WARN_ON(!unmapped))
+                               break;
+               }
        }
 
        dma->iommu_mapped = false;
+
+       if (unmapped_region_cnt)
+               unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
+
        if (do_accounting) {
                vfio_lock_acct(dma->task, -unlocked, NULL);
                return 0;