drm/amdgpu: simplify VM update tracking a bit
authorChristian König <christian.koenig@amd.com>
Tue, 8 Mar 2022 14:06:49 +0000 (15:06 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 25 Mar 2022 16:40:26 +0000 (12:40 -0400)
Store the 64bit sequence directly. Makes it simpler to use and saves a bit
of fence reference counting overhead.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h

index 92a70fb57fa3a6e3927f3ae4a59167791f51fc6b..4f1399573107f94bda63bfb0b80e196c82b71515 100644 (file)
@@ -276,19 +276,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
        uint64_t fence_context = adev->fence_context + ring->idx;
-       struct dma_fence *updates = sync->last_vm_update;
        bool needs_flush = vm->use_cpu_for_update;
-       int r = 0;
+       uint64_t updates = sync->last_vm_update;
+       int r;
 
        *id = vm->reserved_vmid[vmhub];
-       if (updates && (*id)->flushed_updates &&
-           updates->context == (*id)->flushed_updates->context &&
-           !dma_fence_is_later(updates, (*id)->flushed_updates))
-               updates = NULL;
-
        if ((*id)->owner != vm->immediate.fence_context ||
-           job->vm_pd_addr != (*id)->pd_gpu_addr ||
-           updates || !(*id)->last_flush ||
+           (*id)->pd_gpu_addr != job->vm_pd_addr ||
+           (*id)->flushed_updates < updates ||
+           !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
             !dma_fence_is_signaled((*id)->last_flush))) {
                struct dma_fence *tmp;
@@ -302,8 +298,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
                tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
                if (tmp) {
                        *id = NULL;
-                       r = amdgpu_sync_fence(sync, tmp);
-                       return r;
+                       return amdgpu_sync_fence(sync, tmp);
                }
                needs_flush = true;
        }
@@ -315,10 +310,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
        if (r)
                return r;
 
-       if (updates) {
-               dma_fence_put((*id)->flushed_updates);
-               (*id)->flushed_updates = dma_fence_get(updates);
-       }
+       (*id)->flushed_updates = updates;
        job->vm_needs_flush = needs_flush;
        return 0;
 }
@@ -346,7 +338,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        uint64_t fence_context = adev->fence_context + ring->idx;
-       struct dma_fence *updates = sync->last_vm_update;
+       uint64_t updates = sync->last_vm_update;
        int r;
 
        job->vm_needs_flush = vm->use_cpu_for_update;
@@ -354,7 +346,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
        /* Check if we can use a VMID already assigned to this VM */
        list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
                bool needs_flush = vm->use_cpu_for_update;
-               struct dma_fence *flushed;
 
                /* Check all the prerequisites to using this VMID */
                if ((*id)->owner != vm->immediate.fence_context)
@@ -368,8 +359,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                     !dma_fence_is_signaled((*id)->last_flush)))
                        needs_flush = true;
 
-               flushed  = (*id)->flushed_updates;
-               if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+               if ((*id)->flushed_updates < updates)
                        needs_flush = true;
 
                if (needs_flush && !adev->vm_manager.concurrent_flush)
@@ -382,11 +372,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                if (r)
                        return r;
 
-               if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
-                       dma_fence_put((*id)->flushed_updates);
-                       (*id)->flushed_updates = dma_fence_get(updates);
-               }
-
+               (*id)->flushed_updates = updates;
                job->vm_needs_flush |= needs_flush;
                return 0;
        }
@@ -432,8 +418,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                        goto error;
 
                if (!id) {
-                       struct dma_fence *updates = sync->last_vm_update;
-
                        /* Still no ID to use? Then use the idle one found earlier */
                        id = idle;
 
@@ -442,8 +426,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                        if (r)
                                goto error;
 
-                       dma_fence_put(id->flushed_updates);
-                       id->flushed_updates = dma_fence_get(updates);
+                       id->flushed_updates = sync->last_vm_update;
                        job->vm_needs_flush = true;
                }
 
@@ -610,7 +593,6 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
                        struct amdgpu_vmid *id = &id_mgr->ids[j];
 
                        amdgpu_sync_free(&id->active);
-                       dma_fence_put(id->flushed_updates);
                        dma_fence_put(id->last_flush);
                        dma_fence_put(id->pasid_mapping);
                }
index 0c3b4fa1f93603bcaf9692a94c6db45ddeae2c78..06c8a0034fa5229b183c545254daba29fdc1210f 100644 (file)
@@ -47,7 +47,7 @@ struct amdgpu_vmid {
 
        uint64_t                pd_gpu_addr;
        /* last flushed PD/PT update */
-       struct dma_fence        *flushed_updates;
+       uint64_t                flushed_updates;
 
        uint32_t                current_gpu_reset_count;
 
index 40e06745fae98a8f3e5b8baabafaa48a5b5e3a52..05e1af9998a331b57c35f774c32bdbc3dc23fb03 100644 (file)
@@ -51,7 +51,7 @@ static struct kmem_cache *amdgpu_sync_slab;
 void amdgpu_sync_create(struct amdgpu_sync *sync)
 {
        hash_init(sync->fences);
-       sync->last_vm_update = NULL;
+       sync->last_vm_update = 0;
 }
 
 /**
@@ -184,7 +184,7 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
        if (!fence)
                return 0;
 
-       amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+       sync->last_vm_update = max(sync->last_vm_update, fence->seqno);
        return amdgpu_sync_fence(sync, fence);
 }
 
@@ -376,8 +376,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
                }
        }
 
-       dma_fence_put(clone->last_vm_update);
-       clone->last_vm_update = dma_fence_get(source->last_vm_update);
+       clone->last_vm_update = source->last_vm_update;
 
        return 0;
 }
@@ -419,8 +418,6 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
                dma_fence_put(e->fence);
                kmem_cache_free(amdgpu_sync_slab, e);
        }
-
-       dma_fence_put(sync->last_vm_update);
 }
 
 /**
index 7c0fe20c470d49c6787b4547397adca912eb9374..876c1ee8869c1048bce856a4758d61b6f4a68c90 100644 (file)
@@ -43,7 +43,7 @@ enum amdgpu_sync_mode {
  */
 struct amdgpu_sync {
        DECLARE_HASHTABLE(fences, 4);
-       struct dma_fence        *last_vm_update;
+       uint64_t        last_vm_update;
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);