drm/ttm: Clear all DMA mappings on demand
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 27 Aug 2021 20:39:08 +0000 (16:39 -0400)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 1 Sep 2021 13:15:01 +0000 (09:15 -0400)
Used by drivers supporting hot unplug to handle all
DMA IOMMU group related dependencies before the group
is removed during device removal and we try to access
it after free when last device pointer from user space
is dropped.

v3:
Switch to ttm_bo_get_unless_zerom
Iterate bdev for pinned list
Switch to ttm_tt_unpopulate

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210827203910.5565-3-andrey.grodzovsky@amd.com
drivers/gpu/drm/ttm/ttm_device.c
include/drm/ttm/ttm_device.h

index 9eb8f54b66fcd688bf4161717cfa125a369866d3..a021d029b7300eb634c62faf16fcd59082ec4f85 100644 (file)
@@ -248,3 +248,50 @@ void ttm_device_fini(struct ttm_device *bdev)
        ttm_global_release();
 }
 EXPORT_SYMBOL(ttm_device_fini);
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+       struct ttm_resource_manager *man;
+       struct ttm_buffer_object *bo;
+       unsigned int i, j;
+
+       spin_lock(&bdev->lru_lock);
+       while (!list_empty(&bdev->pinned)) {
+               bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+               /* Take ref against racing releases once lru_lock is unlocked */
+               if (ttm_bo_get_unless_zero(bo)) {
+                       list_del_init(&bo->lru);
+                       spin_unlock(&bdev->lru_lock);
+
+                       if (bo->ttm)
+                               ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+                       ttm_bo_put(bo);
+                       spin_lock(&bdev->lru_lock);
+               }
+       }
+
+       for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+               man = ttm_manager_type(bdev, i);
+               if (!man || !man->use_tt)
+                       continue;
+
+               for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+                       while (!list_empty(&man->lru[j])) {
+                               bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
+                               if (ttm_bo_get_unless_zero(bo)) {
+                                       list_del_init(&bo->lru);
+                                       spin_unlock(&bdev->lru_lock);
+
+                                       if (bo->ttm)
+                                               ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+                                       ttm_bo_put(bo);
+                                       spin_lock(&bdev->lru_lock);
+                               }
+                       }
+               }
+       }
+       spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
index 03fb44d061e09e7ef5e2d2e3df3c16d757e4d46d..07d722950d5b2e208ffc083a1e946f47877e849d 100644 (file)
@@ -299,5 +299,6 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
                    struct drm_vma_offset_manager *vma_manager,
                    bool use_dma_alloc, bool use_dma32);
 void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
 
 #endif