drm/ttm: optimize the pool shrinker a bit v2
authorChristian König <christian.koenig@amd.com>
Thu, 1 Apr 2021 13:45:33 +0000 (15:45 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 27 Aug 2021 07:55:39 +0000 (09:55 +0200)
Switch back to using a spinlock again by moving the IOMMU unmap outside
of the locked region.

This avoids contention especially while freeing pages.

v2: Add a comment explaining why we need sync_shrinkers().

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210820120528.81114-3-christian.koenig@amd.com
drivers/gpu/drm/ttm/ttm_pool.c

index cb38b1a17b09852d1a509964b5608eebb47129fc..af1b4136962611019a3e9d64cd365cd24b5a8524 100644 (file)
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
 
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
 static struct list_head shrinker_list;
 static struct shrinker mm_shrinker;
 
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
        spin_lock_init(&pt->lock);
        INIT_LIST_HEAD(&pt->pages);
 
-       mutex_lock(&shrinker_lock);
+       spin_lock(&shrinker_lock);
        list_add_tail(&pt->shrinker_list, &shrinker_list);
-       mutex_unlock(&shrinker_lock);
+       spin_unlock(&shrinker_lock);
 }
 
 /* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 {
        struct page *p;
 
-       mutex_lock(&shrinker_lock);
+       spin_lock(&shrinker_lock);
        list_del(&pt->shrinker_list);
-       mutex_unlock(&shrinker_lock);
+       spin_unlock(&shrinker_lock);
 
        while ((p = ttm_pool_type_take(pt)))
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
 static unsigned int ttm_pool_shrink(void)
 {
        struct ttm_pool_type *pt;
-       unsigned int num_freed;
+       unsigned int num_pages;
        struct page *p;
 
-       mutex_lock(&shrinker_lock);
+       spin_lock(&shrinker_lock);
        pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+       list_move_tail(&pt->shrinker_list, &shrinker_list);
+       spin_unlock(&shrinker_lock);
 
        p = ttm_pool_type_take(pt);
        if (p) {
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
-               num_freed = 1 << pt->order;
+               num_pages = 1 << pt->order;
        } else {
-               num_freed = 0;
+               num_pages = 0;
        }
 
-       list_move_tail(&pt->shrinker_list, &shrinker_list);
-       mutex_unlock(&shrinker_lock);
-
-       return num_freed;
+       return num_pages;
 }
 
 /* Return the allocation order based for a page */
@@ -530,6 +529,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
                        for (j = 0; j < MAX_ORDER; ++j)
                                ttm_pool_type_fini(&pool->caching[i].orders[j]);
        }
+
+       /* We removed the pool types from the LRU, but we need to also make sure
+        * that no shrinker is concurrently freeing pages from the pool.
+        */
+       synchronize_shrinkers();
 }
 
 /* As long as pages are available make sure to release at least one */
@@ -604,7 +608,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
 {
        ttm_pool_debugfs_header(m);
 
-       mutex_lock(&shrinker_lock);
+       spin_lock(&shrinker_lock);
        seq_puts(m, "wc\t:");
        ttm_pool_debugfs_orders(global_write_combined, m);
        seq_puts(m, "uc\t:");
@@ -613,7 +617,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
        ttm_pool_debugfs_orders(global_dma32_write_combined, m);
        seq_puts(m, "uc 32\t:");
        ttm_pool_debugfs_orders(global_dma32_uncached, m);
-       mutex_unlock(&shrinker_lock);
+       spin_unlock(&shrinker_lock);
 
        ttm_pool_debugfs_footer(m);
 
@@ -640,7 +644,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 
        ttm_pool_debugfs_header(m);
 
-       mutex_lock(&shrinker_lock);
+       spin_lock(&shrinker_lock);
        for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
                seq_puts(m, "DMA ");
                switch (i) {
@@ -656,7 +660,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
                }
                ttm_pool_debugfs_orders(pool->caching[i].orders, m);
        }
-       mutex_unlock(&shrinker_lock);
+       spin_unlock(&shrinker_lock);
 
        ttm_pool_debugfs_footer(m);
        return 0;
@@ -693,7 +697,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
        if (!page_pool_size)
                page_pool_size = num_pages;
 
-       mutex_init(&shrinker_lock);
+       spin_lock_init(&shrinker_lock);
        INIT_LIST_HEAD(&shrinker_list);
 
        for (i = 0; i < MAX_ORDER; ++i) {