drm/lima: use scheduler dependency tracking
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 5 Aug 2021 10:46:53 +0000 (12:46 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 30 Aug 2021 08:58:20 +0000 (10:58 +0200)
Nothing special going on here.

Aside reviewing the code, it seems like drm_sched_job_arm() should be
moved into lima_sched_context_queue_task and put under some mutex
together with drm_sched_push_job(). See the kerneldoc for
drm_sched_push_job().

v2: Rebase over renamed functions to add dependencies.

Reviewed-by: Qiang Yu <yuq825@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210805104705.862416-9-daniel.vetter@ffwll.ch
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/lima/lima_sched.c
drivers/gpu/drm/lima/lima_sched.h

index c528f40981bbcb3a66796f7eb706aa019333ba20..640acc060467c647c53c5f53535b3a6bf1284455 100644 (file)
@@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
        if (explicit)
                return 0;
 
-       return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
+       return drm_sched_job_add_implicit_dependencies(&task->base,
+                                                      &bo->base.base,
+                                                      write);
 }
 
 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
                if (err)
                        return err;
 
-               err = drm_gem_fence_array_add(&submit->task->deps, fence);
+               err = drm_sched_job_add_dependency(&submit->task->base, fence);
                if (err) {
                        dma_fence_put(fence);
                        return err;
index e968b5a8f0b05c962ff0cd9420aebc0db6c19009..99d5f6f1a8829e14ba2bd719fc30d38b5b2f5833 100644 (file)
@@ -134,24 +134,15 @@ int lima_sched_task_init(struct lima_sched_task *task,
        task->num_bos = num_bos;
        task->vm = lima_vm_get(vm);
 
-       xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
-
        return 0;
 }
 
 void lima_sched_task_fini(struct lima_sched_task *task)
 {
-       struct dma_fence *fence;
-       unsigned long index;
        int i;
 
        drm_sched_job_cleanup(&task->base);
 
-       xa_for_each(&task->deps, index, fence) {
-               dma_fence_put(fence);
-       }
-       xa_destroy(&task->deps);
-
        if (task->bos) {
                for (i = 0; i < task->num_bos; i++)
                        drm_gem_object_put(&task->bos[i]->base.base);
@@ -186,17 +177,6 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
        return fence;
 }
 
-static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
-                                              struct drm_sched_entity *entity)
-{
-       struct lima_sched_task *task = to_lima_task(job);
-
-       if (!xa_empty(&task->deps))
-               return xa_erase(&task->deps, task->last_dep++);
-
-       return NULL;
-}
-
 static int lima_pm_busy(struct lima_device *ldev)
 {
        int ret;
@@ -472,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
 }
 
 static const struct drm_sched_backend_ops lima_sched_ops = {
-       .dependency = lima_sched_dependency,
        .run_job = lima_sched_run_job,
        .timedout_job = lima_sched_timedout_job,
        .free_job = lima_sched_free_job,
index ac70006b0e261d717421101c27b2c9b0305e6b57..6a11764d87b389ad24f10746e2de7f2ff3126d2d 100644 (file)
@@ -23,9 +23,6 @@ struct lima_sched_task {
        struct lima_vm *vm;
        void *frame;
 
-       struct xarray deps;
-       unsigned long last_dep;
-
        struct lima_bo **bos;
        int num_bos;