drm/xe: Internally change the compute_mode and no_dma_fence mode naming
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Mon, 27 Nov 2023 12:33:49 +0000 (13:33 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:44:58 +0000 (11:44 -0500)
The name "compute_mode" can be confusing since compute uses either this
mode or fault_mode to achieve the long-running semantics, and compute_mode
can, moving forward, enable fault_mode under the hood to work around
hardware limitations.

Also the name no_dma_fence_mode really refers to what we elsewhere call
long-running mode and the mode contrary to what its name suggests allows
dma-fences as in-fences.

So in an attempt to be more consistent, rename
no_dma_fence_mode -> lr_mode
compute_mode      -> preempt_fence_mode

And adjust flags so that

preempt_fence_mode sets XE_VM_FLAG_LR_MODE
fault_mode sets XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE

v2:
- Fix a typo in the commit message (Oak Zeng)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231127123349.23698-1-thomas.hellstrom@linux.intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_types.h

index 85a8a793f527ba477254b853e9148e2abdea1810..5ec37df33afe43ca45c198ce8d0c52f52b5d965e 100644 (file)
@@ -100,7 +100,7 @@ static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
        LIST_HEAD(dups);
        int err = 0;
 
-       if (xe_vm_no_dma_fences(vm))
+       if (xe_vm_in_lr_mode(vm))
                return 0;
 
        /*
@@ -182,7 +182,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        for (i = 0; i < args->num_syncs; i++) {
                err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
                                          &syncs_user[i], true,
-                                         xe_vm_no_dma_fences(vm));
+                                         xe_vm_in_lr_mode(vm));
                if (err)
                        goto err_syncs;
        }
@@ -197,7 +197,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        }
 
 retry:
-       if (!xe_vm_no_dma_fences(vm) && xe_vm_userptr_check_repin(vm)) {
+       if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
                err = down_write_killable(&vm->lock);
                write_locked = true;
        } else {
@@ -279,7 +279,7 @@ retry:
        }
 
        /* Wait behind munmap style rebinds */
-       if (!xe_vm_no_dma_fences(vm)) {
+       if (!xe_vm_in_lr_mode(vm)) {
                err = drm_sched_job_add_resv_dependencies(&job->drm,
                                                          xe_vm_resv(vm),
                                                          DMA_RESV_USAGE_KERNEL);
@@ -292,7 +292,7 @@ retry:
        if (err)
                goto err_put_job;
 
-       if (!xe_vm_no_dma_fences(vm)) {
+       if (!xe_vm_in_lr_mode(vm)) {
                err = down_read_interruptible(&vm->userptr.notifier_lock);
                if (err)
                        goto err_put_job;
@@ -307,7 +307,7 @@ retry:
         * the job and let the DRM scheduler / backend clean up the job.
         */
        xe_sched_job_arm(job);
-       if (!xe_vm_no_dma_fences(vm)) {
+       if (!xe_vm_in_lr_mode(vm)) {
                /* Block userptr invalidations / BO eviction */
                dma_resv_add_fence(xe_vm_resv(vm),
                                   &job->drm.s_fence->finished,
@@ -330,14 +330,14 @@ retry:
        xe_sched_job_push(job);
        xe_vm_reactivate_rebind(vm);
 
-       if (!err && !xe_vm_no_dma_fences(vm)) {
+       if (!err && !xe_vm_in_lr_mode(vm)) {
                spin_lock(&xe->ttm.lru_lock);
                ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
                spin_unlock(&xe->ttm.lru_lock);
        }
 
 err_repin:
-       if (!xe_vm_no_dma_fences(vm))
+       if (!xe_vm_in_lr_mode(vm))
                up_read(&vm->userptr.notifier_lock);
 err_put_job:
        if (err)
index fbb4d3cca9f68188f3bda9e7378bc8c18401c1da..98fc13c89a4da0d931fe7820851333e7fc2f10fb 100644 (file)
@@ -327,7 +327,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, xe_vm_in_compute_mode(q->vm)))
+       if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
                return -EINVAL;
 
        if (value)
@@ -705,14 +705,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
 
                q = xe_exec_queue_create(xe, vm, logical_mask,
                                         args->width, hwe,
-                                        xe_vm_no_dma_fences(vm) ? 0 :
+                                        xe_vm_in_lr_mode(vm) ? 0 :
                                         EXEC_QUEUE_FLAG_PERSISTENT);
                up_read(&vm->lock);
                xe_vm_put(vm);
                if (IS_ERR(q))
                        return PTR_ERR(q);
 
-               if (xe_vm_in_compute_mode(vm)) {
+               if (xe_vm_in_preempt_fence_mode(vm)) {
                        q->compute.context = dma_fence_context_alloc(1);
                        spin_lock_init(&q->compute.lock);
 
@@ -785,7 +785,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
  */
 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
 {
-       return q->vm && xe_vm_no_dma_fences(q->vm) &&
+       return q->vm && xe_vm_in_lr_mode(q->vm) &&
                !(q->flags & EXEC_QUEUE_FLAG_VM);
 }
 
index b13c925c56ee91662c580dfbf54a3b9d8a9352b9..32c234d753fdd9b82b435d52c1c28006b198dcea 100644 (file)
@@ -1211,7 +1211,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        ge->q = q;
        init_waitqueue_head(&ge->suspend_wait);
 
-       timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
+       timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
                  q->hwe->eclass->sched_props.job_timeout_ms;
        err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
                            get_submit_wq(guc),
index 1fd461fb426e107d1043d7340d59392a52222755..c6c9b723db5acab98483d7b62c4f5da4df52192f 100644 (file)
@@ -1292,8 +1292,8 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
         * non-faulting LR, in particular on user-space batch buffer chaining,
         * it needs to be done here.
         */
-       if ((rebind && !xe_vm_no_dma_fences(vm) && !vm->batch_invalidate_tlb) ||
-           (!rebind && vm->scratch_bo[tile->id] && xe_vm_in_compute_mode(vm))) {
+       if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
+           (!rebind && vm->scratch_bo[tile->id] && xe_vm_in_preempt_fence_mode(vm))) {
                ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
                if (!ifence)
                        return ERR_PTR(-ENOMEM);
@@ -1355,7 +1355,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
                        xe_bo_put_commit(&deferred);
                }
                if (!rebind && last_munmap_rebind &&
-                   xe_vm_in_compute_mode(vm))
+                   xe_vm_in_preempt_fence_mode(vm))
                        xe_vm_queue_rebind_worker(vm);
        } else {
                kfree(rfence);
index 84c700aed8ac7568fe0716626493c29dba200bbe..a9c7ae815bec55eb64cf56750a3d5b873f8f9220 100644 (file)
@@ -92,7 +92,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
        /* Migration and kernel engines have their own locking */
        if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
                lockdep_assert_held(&q->vm->lock);
-               if (!xe_vm_no_dma_fences(q->vm))
+               if (!xe_vm_in_lr_mode(q->vm))
                        xe_vm_assert_held(q->vm);
        }
 
index eafe53c2f55dfc389e7c54a096cb39aa1e49aafb..ea96ba4b41da4ddcd1bfd706848ccd40853ca598 100644 (file)
@@ -100,7 +100,7 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
                        struct xe_sync_entry *sync,
                        struct drm_xe_sync __user *sync_user,
-                       bool exec, bool no_dma_fences)
+                       bool exec, bool in_lr_mode)
 {
        struct drm_xe_sync sync_in;
        int err;
@@ -118,7 +118,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
        signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
        switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
        case DRM_XE_SYNC_FLAG_SYNCOBJ:
-               if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
+               if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
                        return -EOPNOTSUPP;
 
                if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
@@ -136,7 +136,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
                break;
 
        case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
-               if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
+               if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
                        return -EOPNOTSUPP;
 
                if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
index 7c0ae43731803bb97217fd3e6d4721d11a8b2bf7..c33ae4db4e02abe8fb85891583f46dd0c027aa3b 100644 (file)
@@ -340,7 +340,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
        int err;
        bool wait;
 
-       xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+       xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
 
        down_write(&vm->lock);
        drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
@@ -394,7 +394,7 @@ out_unlock:
  */
 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
 {
-       if (!xe_vm_in_compute_mode(vm))
+       if (!xe_vm_in_preempt_fence_mode(vm))
                return;
 
        down_write(&vm->lock);
@@ -596,7 +596,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
        long wait;
        int __maybe_unused tries = 0;
 
-       xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+       xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
        trace_xe_vm_rebind_worker_enter(vm);
 
        down_write(&vm->lock);
@@ -840,7 +840,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
        struct xe_vma *vma, *next;
 
        lockdep_assert_held(&vm->lock);
-       if (xe_vm_no_dma_fences(vm) && !rebind_worker)
+       if (xe_vm_in_lr_mode(vm) && !rebind_worker)
                return NULL;
 
        xe_vm_assert_held(vm);
@@ -1436,9 +1436,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                vm->batch_invalidate_tlb = true;
        }
 
-       if (flags & XE_VM_FLAG_COMPUTE_MODE) {
+       if (flags & XE_VM_FLAG_LR_MODE) {
                INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
-               vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
+               vm->flags |= XE_VM_FLAG_LR_MODE;
                vm->batch_invalidate_tlb = false;
        }
 
@@ -1526,7 +1526,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        xe_assert(xe, !vm->preempt.num_exec_queues);
 
        xe_vm_close(vm);
-       if (xe_vm_in_compute_mode(vm))
+       if (xe_vm_in_preempt_fence_mode(vm))
                flush_work(&vm->preempt.rebind_work);
 
        down_write(&vm->lock);
@@ -1975,11 +1975,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
                flags |= XE_VM_FLAG_SCRATCH_PAGE;
        if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
-               flags |= XE_VM_FLAG_COMPUTE_MODE;
+               flags |= XE_VM_FLAG_LR_MODE;
        if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
                flags |= XE_VM_FLAG_ASYNC_DEFAULT;
        if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
-               flags |= XE_VM_FLAG_FAULT_MODE;
+               flags |= XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE;
 
        vm = xe_vm_create(xe, flags);
        if (IS_ERR(vm))
@@ -3066,7 +3066,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
                err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
                                          &syncs_user[num_syncs], false,
-                                         xe_vm_no_dma_fences(vm));
+                                         xe_vm_in_lr_mode(vm));
                if (err)
                        goto free_syncs;
        }
index 45b70ba86553511629d34e1348cd87cdd5aa59ff..12bb5d79487f234394e8ab565a315100c47273ca 100644 (file)
@@ -149,19 +149,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
 
 void xe_vm_close_and_put(struct xe_vm *vm);
 
-static inline bool xe_vm_in_compute_mode(struct xe_vm *vm)
+static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
 {
-       return vm->flags & XE_VM_FLAG_COMPUTE_MODE;
+       return vm->flags & XE_VM_FLAG_FAULT_MODE;
 }
 
-static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
+static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
 {
-       return vm->flags & XE_VM_FLAG_FAULT_MODE;
+       return vm->flags & XE_VM_FLAG_LR_MODE;
 }
 
-static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
+static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
 {
-       return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
+       return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
 }
 
 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
@@ -181,7 +181,7 @@ extern struct ttm_device_funcs xe_ttm_funcs;
 
 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
 {
-       xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+       xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
        queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
 }
 
@@ -196,7 +196,7 @@ static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
  */
 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
 {
-       if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
+       if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
                vm->preempt.rebind_deactivated = false;
                xe_vm_queue_rebind_worker(vm);
        }
index 97d779d8a7d382a5177ebf724f59451f24bb481a..fc2645e07578df46dacfbfe4c43566eb0ba55bb8 100644 (file)
@@ -154,7 +154,7 @@ struct xe_vm {
         * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
         */
 #define XE_VM_FLAG_64K                 BIT(0)
-#define XE_VM_FLAG_COMPUTE_MODE                BIT(1)
+#define XE_VM_FLAG_LR_MODE             BIT(1)
 #define XE_VM_FLAG_ASYNC_DEFAULT       BIT(2)
 #define XE_VM_FLAG_MIGRATION           BIT(3)
 #define XE_VM_FLAG_SCRATCH_PAGE                BIT(4)