iommu/iova: Improve restart logic
authorRobin Murphy <robin.murphy@arm.com>
Fri, 5 Mar 2021 16:35:23 +0000 (16:35 +0000)
committerJoerg Roedel <jroedel@suse.de>
Thu, 18 Mar 2021 10:01:09 +0000 (11:01 +0100)
When restarting after searching below the cached node fails, resetting
the start point to the anchor node is often overly pessimistic. If
allocations are made with mixed limits - particularly in the case of the
opportunistic 32-bit allocation for PCI devices - this could mean
significant time wasted walking through the whole populated upper range
just to reach the initial limit. We can improve on that by implementing
a proper tree traversal to find the first node above the relevant limit,
and set the exact start point.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/076b3484d1e5057b95d8c387c894bd6ad2514043.1614962123.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iova.c

index c28003e1d2ee9b584dacee9efbf5e28472b1e6df..471c48dd71e761f8cf2a165ee30eeac694c2e6ed 100644 (file)
@@ -154,6 +154,43 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
                iovad->cached_node = rb_next(&free->node);
 }
 
+static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
+{
+       struct rb_node *node, *next;
+       /*
+        * Ideally what we'd like to judge here is whether limit_pfn is close
+        * enough to the highest-allocated IOVA that starting the allocation
+        * walk from the anchor node will be quicker than this initial work to
+        * find an exact starting point (especially if that ends up being the
+        * anchor node anyway). This is an incredibly crude approximation which
+        * only really helps the most likely case, but is at least trivially easy.
+        */
+       if (limit_pfn > iovad->dma_32bit_pfn)
+               return &iovad->anchor.node;
+
+       node = iovad->rbroot.rb_node;
+       while (to_iova(node)->pfn_hi < limit_pfn)
+               node = node->rb_right;
+
+search_left:
+       while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
+               node = node->rb_left;
+
+       if (!node->rb_left)
+               return node;
+
+       next = node->rb_left;
+       while (next->rb_right) {
+               next = next->rb_right;
+               if (to_iova(next)->pfn_lo >= limit_pfn) {
+                       node = next;
+                       goto search_left;
+               }
+       }
+
+       return node;
+}
+
 /* Insert the iova into domain rbtree by holding writer lock */
 static void
 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
@@ -219,7 +256,7 @@ retry:
                if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
                        high_pfn = limit_pfn;
                        low_pfn = retry_pfn;
-                       curr = &iovad->anchor.node;
+                       curr = iova_find_limit(iovad, limit_pfn);
                        curr_iova = to_iova(curr);
                        goto retry;
                }