drm/xe: Invalidate userptr VMA on page pin fault
authorMatthew Brost <matthew.brost@intel.com>
Tue, 12 Mar 2024 18:39:07 +0000 (11:39 -0700)
committerLucas De Marchi <lucas.demarchi@intel.com>
Thu, 14 Mar 2024 19:29:33 +0000 (14:29 -0500)
Rather than return an error to the user or ban the VM when userptr VMA
page pin fails with -EFAULT, invalidate VMA mappings. This supports the
UMD use case of freeing userptr while still having bindings.

Now that non-faulting VMs can invalidate VMAs, drop the usm prefix for
the tile_invalidated member.

v2:
 - Fix build error (CI)
v3:
 - Don't invalidate VMA if in fault mode, rather kill VM (Thomas)
 - Update commit message with tile_invalidated name chagne (Thomas)
 - Wait VM bookkeep slots with VM resv lock (Thomas)
v4:
 - Move list_del_init(&userptr.repin_link) after error check (Thomas)
 - Assert not in fault mode (Matthew)

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240312183907.933835-1-matthew.brost@intel.com
(cherry picked from commit 521db22a1d70dbc596a07544a738416025b1b63c)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_trace.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index 73c535193a984b385f65986c9b1c0d3b79f1f5b4..241c294270d9167f25d1898f8f590c7aabb06ca0 100644 (file)
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
 {
        return BIT(tile->id) & vma->tile_present &&
-               !(BIT(tile->id) & vma->usm.tile_invalidated);
+               !(BIT(tile->id) & vma->tile_invalidated);
 }
 
 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -226,7 +226,7 @@ retry_userptr:
 
        if (xe_vma_is_userptr(vma))
                ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
-       vma->usm.tile_invalidated &= ~BIT(tile->id);
+       vma->tile_invalidated &= ~BIT(tile->id);
 
 unlock_dma_resv:
        drm_exec_fini(&exec);
index 3b97633d81d8508fdeae9035dca60a067473e04e..d82c138f1ecefa988481c9e2a2bc96b0ee29d420 100644 (file)
@@ -464,7 +464,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
             TP_ARGS(vma)
 );
 
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
             TP_PROTO(struct xe_vma *vma),
             TP_ARGS(vma)
 );
index e3bde897f6e8aa90a591aab9255e35aed7567b15..3dd8bb64bb00ba3b0dd2ce50f7b9fc08c3774b66 100644 (file)
@@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
        int err = 0;
        LIST_HEAD(tmp_evict);
 
+       xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
        lockdep_assert_held_write(&vm->lock);
 
        /* Collect invalidated userptrs */
@@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
        list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
                                 userptr.repin_link) {
                err = xe_vma_userptr_pin_pages(uvma);
-               if (err < 0)
-                       return err;
+               if (err == -EFAULT) {
+                       list_del_init(&uvma->userptr.repin_link);
 
-               list_del_init(&uvma->userptr.repin_link);
-               list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+                       /* Wait for pending binds */
+                       xe_vm_lock(vm, false);
+                       dma_resv_wait_timeout(xe_vm_resv(vm),
+                                             DMA_RESV_USAGE_BOOKKEEP,
+                                             false, MAX_SCHEDULE_TIMEOUT);
+
+                       err = xe_vm_invalidate_vma(&uvma->vma);
+                       xe_vm_unlock(vm);
+                       if (err)
+                               return err;
+               } else {
+                       if (err < 0)
+                               return err;
+
+                       list_del_init(&uvma->userptr.repin_link);
+                       list_move_tail(&uvma->vma.combined_links.rebind,
+                                      &vm->rebind_list);
+               }
        }
 
        return 0;
@@ -1987,7 +2004,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                        return err;
        }
 
-       if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+       if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
                return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
                                  true, first_op, last_op);
        } else {
@@ -3185,9 +3202,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
        u8 id;
        int ret;
 
-       xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
        xe_assert(xe, !xe_vma_is_null(vma));
-       trace_xe_vma_usm_invalidate(vma);
+       trace_xe_vma_invalidate(vma);
 
        /* Check that we don't race with page-table updates */
        if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3225,7 +3241,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
                }
        }
 
-       vma->usm.tile_invalidated = vma->tile_mask;
+       vma->tile_invalidated = vma->tile_mask;
 
        return 0;
 }
index 292f8cadb40fde7c4123663cbf9ba991d8795855..713996f7dc59f6cceedd3b19228f85c766dd9807 100644 (file)
@@ -83,11 +83,8 @@ struct xe_vma {
                struct work_struct destroy_work;
        };
 
-       /** @usm: unified shared memory state */
-       struct {
-               /** @tile_invalidated: VMA has been invalidated */
-               u8 tile_invalidated;
-       } usm;
+       /** @tile_invalidated: VMA has been invalidated */
+       u8 tile_invalidated;
 
        /** @tile_mask: Tile mask of where to create binding for this VMA */
        u8 tile_mask;