vm_list_node) {
                struct amdgpu_bo *pd = peer_vm->root.base.bo;
 
-               ret = amdgpu_sync_resv(NULL,
-                                       sync, pd->tbo.base.resv,
-                                       AMDGPU_FENCE_OWNER_KFD, false);
+               ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+                                      AMDGPU_SYNC_NE_OWNER,
+                                      AMDGPU_FENCE_OWNER_KFD);
                if (ret)
                        return ret;
        }
 
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                struct dma_resv *resv = bo->tbo.base.resv;
+               enum amdgpu_sync_mode sync_mode;
 
-               r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, &fpriv->vm,
-                                    amdgpu_bo_explicit_sync(bo));
-
+               sync_mode = amdgpu_bo_explicit_sync(bo) ?
+                       AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+               r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+                                    &fpriv->vm);
                if (r)
                        return r;
        }
 
        int r;
 
        amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
+       amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+                        AMDGPU_SYNC_NE_OWNER, owner);
        r = amdgpu_sync_wait(&sync, intr);
        amdgpu_sync_free(&sync);
 
 
  *
  * @sync: sync object to add fences from reservation object to
  * @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
  *
  * Sync to the fence
  */
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-                    struct amdgpu_sync *sync,
-                    struct dma_resv *resv,
-                    void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                    struct dma_resv *resv, enum amdgpu_sync_mode mode,
+                    void *owner)
 {
        struct dma_resv_list *flist;
        struct dma_fence *f;
-       void *fence_owner;
        unsigned i;
        int r = 0;
 
                return r;
 
        for (i = 0; i < flist->shared_count; ++i) {
+               void *fence_owner;
+
                f = rcu_dereference_protected(flist->shared[i],
                                              dma_resv_held(resv));
                /* We only want to trigger KFD eviction fences on
                    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
                        continue;
 
-               if (amdgpu_sync_same_dev(adev, f)) {
-                       /* VM updates only sync with moves but not with user
-                        * command submissions or KFD evictions fences
-                        */
-                       if (owner == AMDGPU_FENCE_OWNER_VM &&
-                           fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+               /* VM updates only sync with moves but not with user
+                * command submissions or KFD evictions fences
+                */
+               if (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+                   owner == AMDGPU_FENCE_OWNER_VM)
+                       continue;
+
+               /* Ignore fences depending on the sync mode */
+               switch (mode) {
+               case AMDGPU_SYNC_ALWAYS:
+                       break;
+
+               case AMDGPU_SYNC_NE_OWNER:
+                       if (amdgpu_sync_same_dev(adev, f) &&
+                           fence_owner == owner)
                                continue;
+                       break;
 
-                       /* Ignore fence from the same owner and explicit one as
-                        * long as it isn't undefined.
-                        */
-                       if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-                           (fence_owner == owner || explicit_sync))
+               case AMDGPU_SYNC_EQ_OWNER:
+                       if (amdgpu_sync_same_dev(adev, f) &&
+                           fence_owner != owner)
+                               continue;
+                       break;
+
+               case AMDGPU_SYNC_EXPLICIT:
+                       if (owner != AMDGPU_FENCE_OWNER_UNDEFINED)
                                continue;
+                       break;
                }
 
                r = amdgpu_sync_fence(sync, f, false);
 
 struct amdgpu_device;
 struct amdgpu_ring;
 
+enum amdgpu_sync_mode {
+       AMDGPU_SYNC_ALWAYS,
+       AMDGPU_SYNC_NE_OWNER,
+       AMDGPU_SYNC_EQ_OWNER,
+       AMDGPU_SYNC_EXPLICIT
+};
+
 /*
  * Container for fences used to sync command submissions.
  */
 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
                      bool explicit);
 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-                    struct amdgpu_sync *sync,
-                    struct dma_resv *resv,
-                    void *owner,
-                    bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                    struct dma_resv *resv, enum amdgpu_sync_mode mode,
+                    void *owner);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
                                     struct amdgpu_ring *ring);
 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
 
        }
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED,
-                                    false);
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
 
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED, false);
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
 
                        goto err_free;
        } else {
                r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED, false);
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r)
                        goto err_free;
 
 
                return 0;
 
        return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
-                               owner, false);
+                               AMDGPU_SYNC_NE_OWNER, owner);
 }
 
 /**