drm/amdgpu: cleanup ctx implementation
authorChristian König <christian.koenig@amd.com>
Fri, 13 May 2022 11:54:02 +0000 (13:54 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 26 May 2022 18:56:31 +0000 (14:56 -0400)
Let each context have a pointer to the ctx manager and properly
initialize the adev pointer inside the context manager.

Reduce the BUG_ON() in amdgpu_ctx_add_fence() into a WARN_ON() and
directly return the sequence number instead of writing into a parmeter.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Shashank Sharma <shashank.sharma@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

index e552a20048680f31f00546013a768e9dd797c354..84caab5e4d221d6f231f3cc02c39b4685762004c 100644 (file)
@@ -1252,7 +1252,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
-       amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
+       seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
        amdgpu_cs_post_dependencies(p);
 
        if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
index c317078d1afd0d3e7cab7e8c9251e94c6f21f483..a61e4c83a545a141fa87c3e021b1e97e65e6fb2d 100644 (file)
@@ -135,9 +135,9 @@ static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_
 
 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
 {
-       struct amdgpu_device *adev = ctx->adev;
-       int32_t ctx_prio;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        unsigned int hw_prio;
+       int32_t ctx_prio;
 
        ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
                        ctx->init_priority : ctx->override_priority;
@@ -166,7 +166,7 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
                                  const u32 ring)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        struct amdgpu_ctx_entity *entity;
        struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
        unsigned num_scheds = 0;
@@ -220,10 +220,8 @@ error_free_entity:
        return r;
 }
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
-                          int32_t priority,
-                          struct drm_file *filp,
-                          struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
+                          struct drm_file *filp, struct amdgpu_ctx *ctx)
 {
        int r;
 
@@ -233,15 +231,14 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
        memset(ctx, 0, sizeof(*ctx));
 
-       ctx->adev = adev;
-
        kref_init(&ctx->refcount);
+       ctx->mgr = mgr;
        spin_lock_init(&ctx->ring_lock);
        mutex_init(&ctx->lock);
 
-       ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+       ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
        ctx->reset_counter_query = ctx->reset_counter;
-       ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+       ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
        ctx->init_priority = priority;
        ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
        ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
@@ -266,7 +263,7 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
                                        u32 *stable_pstate)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        enum amd_dpm_forced_level current_level;
 
        current_level = amdgpu_dpm_get_performance_level(adev);
@@ -294,7 +291,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
                                        u32 stable_pstate)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        enum amd_dpm_forced_level level;
        u32 current_stable_pstate;
        int r;
@@ -345,7 +342,8 @@ done:
 static void amdgpu_ctx_fini(struct kref *ref)
 {
        struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_ctx_mgr *mgr = ctx->mgr;
+       struct amdgpu_device *adev = mgr->adev;
        unsigned i, j, idx;
 
        if (!adev)
@@ -421,7 +419,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
        }
 
        *id = (uint32_t)r;
-       r = amdgpu_ctx_init(adev, priority, filp, ctx);
+       r = amdgpu_ctx_init(mgr, priority, filp, ctx);
        if (r) {
                idr_remove(&mgr->ctx_handles, *id);
                *id = 0;
@@ -671,9 +669,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
        return 0;
 }
 
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
-                         struct drm_sched_entity *entity,
-                         struct dma_fence *fence, uint64_t *handle)
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+                             struct drm_sched_entity *entity,
+                             struct dma_fence *fence)
 {
        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
        uint64_t seq = centity->sequence;
@@ -682,8 +680,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
 
        idx = seq & (amdgpu_sched_jobs - 1);
        other = centity->fences[idx];
-       if (other)
-               BUG_ON(!dma_fence_is_signaled(other));
+       WARN_ON(other && !dma_fence_is_signaled(other));
 
        dma_fence_get(fence);
 
@@ -693,8 +690,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
        spin_unlock(&ctx->ring_lock);
 
        dma_fence_put(other);
-       if (handle)
-               *handle = seq;
+       return seq;
 }
 
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -731,7 +727,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
                                           int hw_ip,
                                           int32_t priority)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        unsigned int hw_prio;
        struct drm_gpu_scheduler **scheds = NULL;
        unsigned num_scheds;
@@ -796,8 +792,10 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
        return r;
 }
 
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+                        struct amdgpu_device *adev)
 {
+       mgr->adev = adev;
        mutex_init(&mgr->lock);
        idr_init(&mgr->ctx_handles);
 }
index 142f2f87d44cea617f01825b44d32beee162e5bc..681050bc828c3741675f07085f9b30a00a0e247d 100644 (file)
@@ -40,7 +40,7 @@ struct amdgpu_ctx_entity {
 
 struct amdgpu_ctx {
        struct kref                     refcount;
-       struct amdgpu_device            *adev;
+       struct amdgpu_ctx_mgr           *mgr;
        unsigned                        reset_counter;
        unsigned                        reset_counter_query;
        uint32_t                        vram_lost_counter;
@@ -70,9 +70,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
                          u32 ring, struct drm_sched_entity **entity);
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
-                         struct drm_sched_entity *entity,
-                         struct dma_fence *fence, uint64_t *seq);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+                             struct drm_sched_entity *entity,
+                             struct dma_fence *fence);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                       struct drm_sched_entity *entity,
                                       uint64_t seq);
@@ -85,7 +85,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
                               struct drm_sched_entity *entity);
 
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+                        struct amdgpu_device *adev);
 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
index 497478f8a5d384dd3f616fdda68be7d6f1b81220..801f6fa692e98c706b8389e1673e3b3e20f024ff 100644 (file)
@@ -1152,7 +1152,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        mutex_init(&fpriv->bo_list_lock);
        idr_init(&fpriv->bo_list_handles);
 
-       amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+       amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
 
        file_priv->driver_priv = fpriv;
        goto out_suspend;