job->sched->ops->free_job(job);
 }
 
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+                        struct drm_sched_entity *entity)
+{
+       if (!xa_empty(&job->dependencies))
+               return xa_erase(&job->dependencies, job->last_dependency++);
+
+       if (job->sched->ops->dependency)
+               return job->sched->ops->dependency(job, entity);
+
+       return NULL;
+}
+
 /**
  * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
  *
                struct drm_sched_fence *s_fence = job->s_fence;
 
                /* Wait for all dependencies to avoid data corruptions */
-               while ((f = job->sched->ops->dependency(job, entity)))
+               while ((f = drm_sched_job_dependency(job, entity)))
                        dma_fence_wait(f, false);
 
                drm_sched_fence_scheduled(s_fence);
  */
 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct drm_sched_job *sched_job;
 
        sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
                return NULL;
 
        while ((entity->dependency =
-                       sched->ops->dependency(sched_job, entity))) {
+                       drm_sched_job_dependency(sched_job, entity))) {
                trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
 
                if (drm_sched_entity_add_dependency_cb(entity))
 
 
        INIT_LIST_HEAD(&job->list);
 
+       xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_sched_job_init);
 }
 EXPORT_SYMBOL(drm_sched_job_arm);
 
+/**
+ * drm_sched_job_add_dependency - adds the fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Note that @fence is consumed in both the success and error cases.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+                                struct dma_fence *fence)
+{
+       struct dma_fence *entry;
+       unsigned long index;
+       u32 id = 0;
+       int ret;
+
+       if (!fence)
+               return 0;
+
+       /* Deduplicate if we already depend on a fence from the same context.
+        * This lets the size of the array of deps scale with the number of
+        * engines involved, rather than the number of BOs.
+        */
+       xa_for_each(&job->dependencies, index, entry) {
+               if (entry->context != fence->context)
+                       continue;
+
+               if (dma_fence_is_later(fence, entry)) {
+                       dma_fence_put(entry);
+                       xa_store(&job->dependencies, index, fence, GFP_KERNEL);
+               } else {
+                       dma_fence_put(fence);
+               }
+               return 0;
+       }
+
+       ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
+       if (ret != 0)
+               dma_fence_put(fence);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_dependency);
+
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ *   dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+                                           struct drm_gem_object *obj,
+                                           bool write)
+{
+       int ret;
+       struct dma_fence **fences;
+       unsigned int i, fence_count;
+
+       if (!write) {
+               struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
+
+               return drm_sched_job_add_dependency(job, fence);
+       }
+
+       ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
+       if (ret || !fence_count)
+               return ret;
+
+       for (i = 0; i < fence_count; i++) {
+               ret = drm_sched_job_add_dependency(job, fences[i]);
+               if (ret)
+                       break;
+       }
+
+       for (; i < fence_count; i++)
+               dma_fence_put(fences[i]);
+       kfree(fences);
+       return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+
+
 /**
  * drm_sched_job_cleanup - clean up scheduler job resources
  * @job: scheduler job to clean up
  */
 void drm_sched_job_cleanup(struct drm_sched_job *job)
 {
+       struct dma_fence *fence;
+       unsigned long index;
+
        if (kref_read(&job->s_fence->finished.refcount)) {
                /* drm_sched_job_arm() has been called */
                dma_fence_put(&job->s_fence->finished);
        }
 
        job->s_fence = NULL;
+
+       xa_for_each(&job->dependencies, index, fence) {
+               dma_fence_put(fence);
+       }
+       xa_destroy(&job->dependencies);
+
 }
 EXPORT_SYMBOL(drm_sched_job_cleanup);
 
 
 #include <drm/spsc_queue.h>
 #include <linux/dma-fence.h>
 #include <linux/completion.h>
+#include <linux/xarray.h>
 
 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 
+struct drm_gem_object;
+
 struct drm_gpu_scheduler;
 struct drm_sched_rq;
 
        enum drm_sched_priority         s_priority;
        struct drm_sched_entity         *entity;
        struct dma_fence_cb             cb;
+       /**
+        * @dependencies:
+        *
+        * Contains the dependencies as struct dma_fence for this job, see
+        * drm_sched_job_add_dependency() and
+        * drm_sched_job_add_implicit_dependencies().
+        */
+       struct xarray                   dependencies;
+
+       /** @last_dependency: tracks @dependencies as they signal */
+       unsigned long                   last_dependency;
 };
 
 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
  */
 struct drm_sched_backend_ops {
        /**
-         * @dependency: Called when the scheduler is considering scheduling
-         * this job next, to get another struct dma_fence for this job to
-        * block on.  Once it returns NULL, run_job() may be called.
+        * @dependency:
+        *
+        * Called when the scheduler is considering scheduling this job next, to
+        * get another struct dma_fence for this job to block on.  Once it
+        * returns NULL, run_job() may be called.
+        *
+        * If a driver exclusively uses drm_sched_job_add_dependency() and
+        * drm_sched_job_add_implicit_dependencies() this can be ommitted and
+        * left as NULL.
         */
        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
                                        struct drm_sched_entity *s_entity);
                       struct drm_sched_entity *entity,
                       void *owner);
 void drm_sched_job_arm(struct drm_sched_job *job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+                                struct dma_fence *fence);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+                                           struct drm_gem_object *obj,
+                                           bool write);
+
+
 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
                                    struct drm_gpu_scheduler **sched_list,
                                    unsigned int num_sched_list);