LIST_HEAD(dups);
int err = 0;
- if (xe_vm_no_dma_fences(vm))
+ if (xe_vm_in_lr_mode(vm))
return 0;
/*
for (i = 0; i < args->num_syncs; i++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
&syncs_user[i], true,
- xe_vm_no_dma_fences(vm));
+ xe_vm_in_lr_mode(vm));
if (err)
goto err_syncs;
}
}
retry:
- if (!xe_vm_no_dma_fences(vm) && xe_vm_userptr_check_repin(vm)) {
+ if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
write_locked = true;
} else {
}
/* Wait behind munmap style rebinds */
- if (!xe_vm_no_dma_fences(vm)) {
+ if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL);
if (err)
goto err_put_job;
- if (!xe_vm_no_dma_fences(vm)) {
+ if (!xe_vm_in_lr_mode(vm)) {
err = down_read_interruptible(&vm->userptr.notifier_lock);
if (err)
goto err_put_job;
* the job and let the DRM scheduler / backend clean up the job.
*/
xe_sched_job_arm(job);
- if (!xe_vm_no_dma_fences(vm)) {
+ if (!xe_vm_in_lr_mode(vm)) {
/* Block userptr invalidations / BO eviction */
dma_resv_add_fence(xe_vm_resv(vm),
&job->drm.s_fence->finished,
xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
- if (!err && !xe_vm_no_dma_fences(vm)) {
+ if (!err && !xe_vm_in_lr_mode(vm)) {
spin_lock(&xe->ttm.lru_lock);
ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
spin_unlock(&xe->ttm.lru_lock);
}
err_repin:
- if (!xe_vm_no_dma_fences(vm))
+ if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock);
err_put_job:
if (err)
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, xe_vm_in_compute_mode(q->vm)))
+ if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL;
if (value)
q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe,
- xe_vm_no_dma_fences(vm) ? 0 :
+ xe_vm_in_lr_mode(vm) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
return PTR_ERR(q);
- if (xe_vm_in_compute_mode(vm)) {
+ if (xe_vm_in_preempt_fence_mode(vm)) {
q->compute.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock);
*/
bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
{
- return q->vm && xe_vm_no_dma_fences(q->vm) &&
+ return q->vm && xe_vm_in_lr_mode(q->vm) &&
!(q->flags & EXEC_QUEUE_FLAG_VM);
}
ge->q = q;
init_waitqueue_head(&ge->suspend_wait);
- timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
+ timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
q->hwe->eclass->sched_props.job_timeout_ms;
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
* non-faulting LR, in particular on user-space batch buffer chaining,
* it needs to be done here.
*/
- if ((rebind && !xe_vm_no_dma_fences(vm) && !vm->batch_invalidate_tlb) ||
- (!rebind && vm->scratch_bo[tile->id] && xe_vm_in_compute_mode(vm))) {
+ if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
+ (!rebind && vm->scratch_bo[tile->id] && xe_vm_in_preempt_fence_mode(vm))) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
xe_bo_put_commit(&deferred);
}
if (!rebind && last_munmap_rebind &&
- xe_vm_in_compute_mode(vm))
+ xe_vm_in_preempt_fence_mode(vm))
xe_vm_queue_rebind_worker(vm);
} else {
kfree(rfence);
/* Migration and kernel engines have their own locking */
if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
lockdep_assert_held(&q->vm->lock);
- if (!xe_vm_no_dma_fences(q->vm))
+ if (!xe_vm_in_lr_mode(q->vm))
xe_vm_assert_held(q->vm);
}
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
- bool exec, bool no_dma_fences)
+ bool exec, bool in_lr_mode)
{
struct drm_xe_sync sync_in;
int err;
signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_FLAG_SYNCOBJ:
- if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
+ if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
break;
case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
- if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
+ if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
int err;
bool wait;
- xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
down_write(&vm->lock);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
*/
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{
- if (!xe_vm_in_compute_mode(vm))
+ if (!xe_vm_in_preempt_fence_mode(vm))
return;
down_write(&vm->lock);
long wait;
int __maybe_unused tries = 0;
- xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
trace_xe_vm_rebind_worker_enter(vm);
down_write(&vm->lock);
struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock);
- if (xe_vm_no_dma_fences(vm) && !rebind_worker)
+ if (xe_vm_in_lr_mode(vm) && !rebind_worker)
return NULL;
xe_vm_assert_held(vm);
vm->batch_invalidate_tlb = true;
}
- if (flags & XE_VM_FLAG_COMPUTE_MODE) {
+ if (flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
- vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
+ vm->flags |= XE_VM_FLAG_LR_MODE;
vm->batch_invalidate_tlb = false;
}
xe_assert(xe, !vm->preempt.num_exec_queues);
xe_vm_close(vm);
- if (xe_vm_in_compute_mode(vm))
+ if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
down_write(&vm->lock);
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
- flags |= XE_VM_FLAG_COMPUTE_MODE;
+ flags |= XE_VM_FLAG_LR_MODE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
flags |= XE_VM_FLAG_ASYNC_DEFAULT;
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
- flags |= XE_VM_FLAG_FAULT_MODE;
+ flags |= XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags);
if (IS_ERR(vm))
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs], false,
- xe_vm_no_dma_fences(vm));
+ xe_vm_in_lr_mode(vm));
if (err)
goto free_syncs;
}
void xe_vm_close_and_put(struct xe_vm *vm);
-static inline bool xe_vm_in_compute_mode(struct xe_vm *vm)
+static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
{
- return vm->flags & XE_VM_FLAG_COMPUTE_MODE;
+ return vm->flags & XE_VM_FLAG_FAULT_MODE;
}
-static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
+static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
{
- return vm->flags & XE_VM_FLAG_FAULT_MODE;
+ return vm->flags & XE_VM_FLAG_LR_MODE;
}
-static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
+static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
{
- return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
+ return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
}
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
- xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
}
*/
static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
{
- if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
+ if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false;
xe_vm_queue_rebind_worker(vm);
}
* from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
*/
#define XE_VM_FLAG_64K BIT(0)
-#define XE_VM_FLAG_COMPUTE_MODE BIT(1)
+#define XE_VM_FLAG_LR_MODE BIT(1)
#define XE_VM_FLAG_ASYNC_DEFAULT BIT(2)
#define XE_VM_FLAG_MIGRATION BIT(3)
#define XE_VM_FLAG_SCRATCH_PAGE BIT(4)