dma-buf: add DMA_RESV_USAGE_BOOKKEEP v3
authorChristian König <christian.koenig@amd.com>
Tue, 9 Nov 2021 10:08:18 +0000 (11:08 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 7 Apr 2022 10:53:54 +0000 (12:53 +0200)
Add an usage for submissions independent of implicit sync but still
interesting for memory management.

v2: cleanup the kerneldoc a bit
v3: separate amdgpu changes from this

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-10-christian.koenig@amd.com
14 files changed:
drivers/dma-buf/dma-resv.c
drivers/dma-buf/st-dma-resv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/gpu/drm/ttm/ttm_bo.c
include/linux/dma-resv.h

index f4860e5f2d8b5b879311f4ff094372d7f989a406..5b64aa554c36b51c76cf940d8caef85edd1afa4d 100644 (file)
@@ -520,7 +520,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
        list = NULL;
 
-       dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
+       dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
        dma_resv_for_each_fence_unlocked(&cursor, f) {
 
                if (dma_resv_iter_is_restarted(&cursor)) {
@@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
  */
 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
 {
-       static const char *usage[] = { "kernel", "write", "read" };
+       static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
        struct dma_resv_iter cursor;
        struct dma_fence *fence;
 
index 062b57d63fa65b3e749668736e0d03237062689d..8ace9e84c84534817e35c7286cf14dcefa68ce01 100644 (file)
@@ -296,7 +296,7 @@ int dma_resv(void)
        int r;
 
        spin_lock_init(&fence_lock);
-       for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
+       for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
             ++usage) {
                r = subtests(tests, (void *)(unsigned long)usage);
                if (r)
index 65998cbcd7f7bfbf7dc7bf87b006db4bcb7e640f..4ba4b54092f19cc9432b8aaf5e2f20b1bb1df69a 100644 (file)
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
        struct dma_fence *fence;
        int r;
 
-       r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence);
+       r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
        if (r)
                goto fallback;
 
@@ -139,7 +139,7 @@ fallback:
        /* Not enough memory for the delayed delete, as last resort
         * block for all the fences to complete.
         */
-       dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+       dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
                              false, MAX_SCHEDULE_TIMEOUT);
        amdgpu_pasid_free(pasid);
 }
index 86f5248676b03caf4600c44763786cf1baf0cd66..b86c0b8252a53b4d1ba259820b3da4fc4d500db7 100644 (file)
@@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 
        mmu_interval_set_seq(mni, cur_seq);
 
-       r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+       r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
                                  false, MAX_SCHEDULE_TIMEOUT);
        mutex_unlock(&adev->notifier_lock);
        if (r <= 0)
index 744e144e5fc2a3fb26f59901e73260733ec6f97d..11c46b3e4c60b4d36f22252dcd9524773bd12404 100644 (file)
@@ -260,7 +260,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                return -EINVAL;
 
        /* TODO: Use DMA_RESV_USAGE_READ here */
-       dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
+       dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
                dma_fence_chain_for_each(f, f) {
                        struct dma_fence *tmp = dma_fence_chain_contained(f);
 
index 5db5066e74b4a4b94835aa3b8cd61e1c4209d1d6..49ffad312d5d5015090801028bd5678204e9ceb1 100644 (file)
@@ -1345,7 +1345,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
         * be resident to run successfully
         */
        dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
-                               DMA_RESV_USAGE_READ, f) {
+                               DMA_RESV_USAGE_BOOKKEEP, f) {
                if (amdkfd_fence_check_mm(f, current->mm))
                        return false;
        }
index a0376fd36a82d9e192f6cbf068c6cab3f03674e6..5277c10d901dcad2269dc7e39ce6edf83c64f8bf 100644 (file)
@@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        struct dma_resv_iter cursor;
        struct dma_fence *fence;
 
-       dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) {
+       dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
                /* Add a callback for each fence in the reservation object */
                amdgpu_vm_prt_get(adev);
                amdgpu_vm_add_prt_cb(adev, fence);
@@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
                return true;
 
        /* Don't evict VM page tables while they are busy */
-       if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ))
+       if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
                return false;
 
        /* Try to block ongoing updates */
@@ -2846,7 +2846,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
        timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
-                                       DMA_RESV_USAGE_READ,
+                                       DMA_RESV_USAGE_BOOKKEEP,
                                        true, timeout);
        if (timeout <= 0)
                return timeout;
index a200d3e665737002860713b711dcd7ef00733401..4115a222a853c8400dea8d5af5ae61b86f371cdb 100644 (file)
@@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
        struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
 
 #ifdef CONFIG_LOCKDEP
-       GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) &&
+       GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
                    i915_gem_object_evictable(obj));
 #endif
        return mr && (mr->type == INTEL_MEMORY_LOCAL ||
index 644fe237601c2b236c6b811db31c0a3a2d714a77..094f06b4ce3359142d8cfb27d7b00c3372e9fee9 100644 (file)
@@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
                return true;
 
        /* we will unbind on next submission, still have userptr pins */
-       r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false,
+       r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
                                  MAX_SCHEDULE_TIMEOUT);
        if (r <= 0)
                drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
index 33e5889d660873ba788f85b823b6cfd12b0e504c..2d9ed3b945740df2cc0fecdcdc789618459374cc 100644 (file)
@@ -62,7 +62,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
                int rel = 0;
 
                dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
-                                   DMA_RESV_USAGE_READ);
+                                   DMA_RESV_USAGE_BOOKKEEP);
                dma_resv_for_each_fence_unlocked(&cursor, fence) {
                        if (dma_resv_iter_is_restarted(&cursor))
                                rel = 0;
index 6616a828f40b3364971f56030c1d4b0705c51299..8c01a7f0e02705d646735c64defa3e270ed9cbac 100644 (file)
@@ -163,7 +163,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
        if (domain == RADEON_GEM_DOMAIN_CPU) {
                /* Asking for cpu access wait for object idle */
                r = dma_resv_wait_timeout(robj->tbo.base.resv,
-                                         DMA_RESV_USAGE_READ,
+                                         DMA_RESV_USAGE_BOOKKEEP,
                                          true, 30 * HZ);
                if (!r)
                        r = -EBUSY;
index 68ebeb1bdfffe5720fd2a7ba1d32aaedbbe7f27b..29fe8423bd9059f12884bed628d604e440773a00 100644 (file)
@@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
                return true;
        }
 
-       r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+       r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
                                  false, MAX_SCHEDULE_TIMEOUT);
        if (r <= 0)
                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
index 6bf3fb1c80451df0b9013810e91ae545ea90d68b..360f980c7e10e7d7ff5e3ea5cdc04198054bd0f6 100644 (file)
@@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
        struct dma_resv_iter cursor;
        struct dma_fence *fence;
 
-       dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+       dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
        dma_resv_for_each_fence_unlocked(&cursor, fence) {
                if (!fence->ops->signaled)
                        dma_fence_enable_sw_signaling(fence);
@@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
        struct dma_resv *resv = &bo->base._resv;
        int ret;
 
-       if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ))
+       if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
                ret = 0;
        else
                ret = -EBUSY;
@@ -264,7 +264,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                        dma_resv_unlock(bo->base.resv);
                spin_unlock(&bo->bdev->lru_lock);
 
-               lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+               lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
                                             interruptible,
                                             30 * HZ);
 
@@ -369,7 +369,7 @@ static void ttm_bo_release(struct kref *kref)
                         * fences block for the BO to become idle
                         */
                        dma_resv_wait_timeout(bo->base.resv,
-                                             DMA_RESV_USAGE_READ, false,
+                                             DMA_RESV_USAGE_BOOKKEEP, false,
                                              30 * HZ);
                }
 
@@ -380,7 +380,7 @@ static void ttm_bo_release(struct kref *kref)
                ttm_mem_io_free(bdev, bo->resource);
        }
 
-       if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) ||
+       if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
            !dma_resv_trylock(bo->base.resv)) {
                /* The BO is not idle, resurrect it for delayed destroy */
                ttm_bo_flush_all_fences(bo);
@@ -1046,13 +1046,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
        long timeout = 15 * HZ;
 
        if (no_wait) {
-               if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ))
+               if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
                        return 0;
                else
                        return -EBUSY;
        }
 
-       timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+       timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
                                        interruptible, timeout);
        if (timeout < 0)
                return timeout;
index a749f229ae917d226a786fabb75290ea74d161ad..1db759eacc98cc7cfe80d767f59f43f6f32afff3 100644 (file)
@@ -55,7 +55,7 @@ struct dma_resv_list;
  * This enum describes the different use cases for a dma_resv object and
  * controls which fences are returned when queried.
  *
- * An important fact is that there is the order KERNEL<WRITE<READ and
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
  * when the dma_resv object is asked for fences for one use case the fences
  * for the lower use case are returned as well.
  *
@@ -93,6 +93,17 @@ enum dma_resv_usage {
         * an implicit read dependency.
         */
        DMA_RESV_USAGE_READ,
+
+       /**
+        * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+        *
+        * This should be used by submissions which don't want to participate in
+        * implicit synchronization.
+        *
+        * The most common case are preemption fences as well as page table
+        * updates and their TLB flushes.
+        */
+       DMA_RESV_USAGE_BOOKKEEP
 };
 
 /**