static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
- struct ttm_validate_buffer tv[2];
- struct ww_acquire_ctx ww;
- struct xe_bo *bo = xe_vma_bo(vma);
- LIST_HEAD(objs);
- LIST_HEAD(dups);
+ struct drm_exec exec;
int err;
- memset(tv, 0, sizeof(tv));
- tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
- list_add(&tv[0].head, &objs);
-
- if (bo) {
- tv[1].bo = &xe_bo_get(bo)->ttm;
- list_add(&tv[1].head, &objs);
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_prepare_vma(&exec, vma, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (XE_WARN_ON(err))
+ break;
}
- err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
- XE_WARN_ON(err);
xe_vma_destroy(vma, NULL);
- ttm_eu_backoff_reservation(&ww, &objs);
- if (bo)
- xe_bo_put(bo);
+ drm_exec_fini(&exec);
}
struct xe_vma *
#define VM_BIND_OP(op) (op & 0xffff)
-struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
-{
- int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
- XE_VM_FLAG_TILE_ID(vm->flags) : 0;
-
- /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
- return &vm->pt_root[idx]->bo->ttm;
-}
-
-static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
-{
- tv->num_shared = 1;
- tv->bo = xe_vm_ttm_bo(vm);
-}
-
static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
return err;
}
-static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
+static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma *vma, struct xe_vma_op *op)
{
- LIST_HEAD(objs);
- LIST_HEAD(dups);
- struct ttm_validate_buffer tv_bo, tv_vm;
- struct ww_acquire_ctx ww;
- struct xe_bo *vbo;
int err;
lockdep_assert_held_write(&vm->lock);
- xe_vm_tv_populate(vm, &tv_vm);
- list_add_tail(&tv_vm.head, &objs);
- vbo = xe_vma_bo(vma);
- if (vbo) {
- /*
- * An unbind can drop the last reference to the BO and
- * the BO is needed for ttm_eu_backoff_reservation so
- * take a reference here.
- */
- xe_bo_get(vbo);
-
- if (!vbo->vm) {
- tv_bo.bo = &vbo->ttm;
- tv_bo.num_shared = 1;
- list_add(&tv_bo.head, &objs);
- }
- }
-
-again:
- err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
- if (err) {
- xe_bo_put(vbo);
+ err = xe_vm_prepare_vma(exec, vma, 1);
+ if (err)
return err;
- }
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
XE_WARN_ON("NOT POSSIBLE");
}
- ttm_eu_backoff_reservation(&ww, &objs);
+ if (err)
+ trace_xe_vma_fail(vma);
+
+ return err;
+}
+
+static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
+{
+ struct drm_exec exec;
+ int err;
+
+retry_userptr:
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ err = op_execute(&exec, vm, vma, op);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ drm_exec_fini(&exec);
+
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(vma);
if (!err)
- goto again;
- }
- xe_bo_put(vbo);
+ goto retry_userptr;
- if (err)
trace_xe_vma_fail(vma);
+ }
return err;
}