return ret;
 
        for (i = 0; i < job->bo_count; i++) {
-               ret = drm_gem_fence_array_add_implicit(&job->deps,
-                                                      job->bo[i], true);
+               ret = drm_sched_job_add_implicit_dependencies(&job->base,
+                                                             job->bo[i], true);
                if (ret) {
                        drm_gem_unlock_reservations(job->bo, job->bo_count,
                                                    acquire_ctx);
 v3d_job_free(struct kref *ref)
 {
        struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
-       unsigned long index;
-       struct dma_fence *fence;
        int i;
 
        for (i = 0; i < job->bo_count; i++) {
        }
        kvfree(job->bo);
 
-       xa_for_each(&job->deps, index, fence) {
-               dma_fence_put(fence);
-       }
-       xa_destroy(&job->deps);
-
        dma_fence_put(job->irq_fence);
        dma_fence_put(job->done_fence);
 
        if (ret < 0)
                return ret;
 
-       xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
        ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
                                 v3d_priv);
        if (ret)
        if (ret == -EINVAL)
                goto fail_job;
 
-       ret = drm_gem_fence_array_add(&job->deps, in_fence);
+       ret = drm_sched_job_add_dependency(&job->base, in_fence);
        if (ret)
                goto fail_job;
 
 fail_job:
        drm_sched_job_cleanup(&job->base);
 fail:
-       xa_destroy(&job->deps);
        pm_runtime_put_autosuspend(v3d->drm.dev);
        return ret;
 }
                v3d_perfmon_get(bin->base.perfmon);
                v3d_push_job(&bin->base);
 
-               ret = drm_gem_fence_array_add(&render->base.deps,
-                                             dma_fence_get(bin->base.done_fence));
+               ret = drm_sched_job_add_dependency(&render->base.base,
+                                                  dma_fence_get(bin->base.done_fence));
                if (ret)
                        goto fail_unreserve;
        }
        if (clean_job) {
                struct dma_fence *render_fence =
                        dma_fence_get(render->base.done_fence);
-               ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+               ret = drm_sched_job_add_dependency(&clean_job->base,
+                                                  render_fence);
                if (ret)
                        goto fail_unreserve;
                clean_job->perfmon = render->base.perfmon;
        mutex_lock(&v3d->sched_lock);
        v3d_push_job(&job->base);
 
-       ret = drm_gem_fence_array_add(&clean_job->deps,
-                                     dma_fence_get(job->base.done_fence));
+       ret = drm_sched_job_add_dependency(&clean_job->base,
+                                          dma_fence_get(job->base.done_fence));
        if (ret)
                goto fail_unreserve;
 
 
  * jobs when bulk background jobs are queued up, we submit a new job
  * to the HW only when it has completed the last one, instead of
  * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_add_dependency() to manage the dependency between bin and
  * render, instead of having the clients submit jobs using the HW's
  * semaphores to interlock between them.
  */
                v3d_perfmon_start(v3d, job->perfmon);
 }
 
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
-                  struct drm_sched_entity *s_entity)
-{
-       struct v3d_job *job = to_v3d_job(sched_job);
-
-       /* XXX: Wait on a fence for switching the GMP if necessary,
-        * and then do so.
-        */
-
-       if (!xa_empty(&job->deps))
-               return xa_erase(&job->deps, job->last_dep++);
-
-       return NULL;
-}
-
 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
 {
        struct v3d_bin_job *job = to_bin_job(sched_job);
 }
 
 static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
-       .dependency = v3d_job_dependency,
        .run_job = v3d_bin_job_run,
        .timedout_job = v3d_bin_job_timedout,
        .free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_render_sched_ops = {
-       .dependency = v3d_job_dependency,
        .run_job = v3d_render_job_run,
        .timedout_job = v3d_render_job_timedout,
        .free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
-       .dependency = v3d_job_dependency,
        .run_job = v3d_tfu_job_run,
        .timedout_job = v3d_generic_job_timedout,
        .free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
-       .dependency = v3d_job_dependency,
        .run_job = v3d_csd_job_run,
        .timedout_job = v3d_csd_job_timedout,
        .free_job = v3d_sched_job_free
 };
 
 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
-       .dependency = v3d_job_dependency,
        .run_job = v3d_cache_clean_job_run,
        .timedout_job = v3d_generic_job_timedout,
        .free_job = v3d_sched_job_free