From: Thomas Hellström Date: Fri, 8 Sep 2023 09:17:16 +0000 (+0200) Subject: drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=1f72718215ff2763653a82d9cbc41bfed3186caa;p=linux.git drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec The VM_BIND functionality and vma destruction was locking potentially multiple dma_resv objects using the ttm_eu_reserve_buffers() function. Rework those to use the drm_exec helper, taking care that any calls to xe_bo_validate() ends up inside an unsealed locking transaction. v4: - Remove an unbalanced xe_bo_put() (igt and Matthew Brost) v5: - Rebase conflict Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-7-thomas.hellstrom@linux.intel.com Signed-off-by: Rodrigo Vivi --- diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 52c5235677c51..4def60249381b 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1129,29 +1129,20 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, static void xe_vma_destroy_unlocked(struct xe_vma *vma) { - struct ttm_validate_buffer tv[2]; - struct ww_acquire_ctx ww; - struct xe_bo *bo = xe_vma_bo(vma); - LIST_HEAD(objs); - LIST_HEAD(dups); + struct drm_exec exec; int err; - memset(tv, 0, sizeof(tv)); - tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma)); - list_add(&tv[0].head, &objs); - - if (bo) { - tv[1].bo = &xe_bo_get(bo)->ttm; - list_add(&tv[1].head, &objs); + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + err = xe_vm_prepare_vma(&exec, vma, 0); + drm_exec_retry_on_contention(&exec); + if (XE_WARN_ON(err)) + break; } - err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups); - XE_WARN_ON(err); xe_vma_destroy(vma, NULL); - ttm_eu_backoff_reservation(&ww, &objs); - if (bo) - xe_bo_put(bo); + drm_exec_fini(&exec); } struct xe_vma * @@ -2142,21 +2133,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, #define VM_BIND_OP(op) (op & 0xffff) -struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm) -{ - int idx = vm->flags & XE_VM_FLAG_MIGRATION ? - XE_VM_FLAG_TILE_ID(vm->flags) : 0; - - /* Safe to use index 0 as all BO in the VM share a single dma-resv lock */ - return &vm->pt_root[idx]->bo->ttm; -} - -static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv) -{ - tv->num_shared = 1; - tv->bo = xe_vm_ttm_bo(vm); -} - static void vm_set_async_error(struct xe_vm *vm, int err) { lockdep_assert_held(&vm->lock); @@ -2668,42 +2644,16 @@ free_fence: return err; } -static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) +static int op_execute(struct drm_exec *exec, struct xe_vm *vm, + struct xe_vma *vma, struct xe_vma_op *op) { - LIST_HEAD(objs); - LIST_HEAD(dups); - struct ttm_validate_buffer tv_bo, tv_vm; - struct ww_acquire_ctx ww; - struct xe_bo *vbo; int err; lockdep_assert_held_write(&vm->lock); - xe_vm_tv_populate(vm, &tv_vm); - list_add_tail(&tv_vm.head, &objs); - vbo = xe_vma_bo(vma); - if (vbo) { - /* - * An unbind can drop the last reference to the BO and - * the BO is needed for ttm_eu_backoff_reservation so - * take a reference here. - */ - xe_bo_get(vbo); - - if (!vbo->vm) { - tv_bo.bo = &vbo->ttm; - tv_bo.num_shared = 1; - list_add(&tv_bo.head, &objs); - } - } - -again: - err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups); - if (err) { - xe_bo_put(vbo); + err = xe_vm_prepare_vma(exec, vma, 1); + if (err) return err; - } xe_vm_assert_held(vm); xe_bo_assert_held(xe_vma_bo(vma)); @@ -2782,17 +2732,36 @@ again: XE_WARN_ON("NOT POSSIBLE"); } - ttm_eu_backoff_reservation(&ww, &objs); + if (err) + trace_xe_vma_fail(vma); + + return err; +} + +static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, + struct xe_vma_op *op) +{ + struct drm_exec exec; + int err; + +retry_userptr: + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&exec) { + err = op_execute(&exec, vm, vma, op); + drm_exec_retry_on_contention(&exec); + if (err) + break; + } + drm_exec_fini(&exec); + if (err == -EAGAIN && xe_vma_is_userptr(vma)) { lockdep_assert_held_write(&vm->lock); err = xe_vma_userptr_pin_pages(vma); if (!err) - goto again; - } - xe_bo_put(vbo); + goto retry_userptr; - if (err) trace_xe_vma_fail(vma); + } return err; } diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index ad9ff2b39a306..cc9dfd8cb7701 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -180,8 +180,6 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence); extern struct ttm_device_funcs xe_ttm_funcs; -struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm); - static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) { XE_WARN_ON(!xe_vm_in_compute_mode(vm));