list_del_init(&vma->notifier.rebind_link);
if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
- list_move_tail(&vma->rebind_link, &vm->rebind_list);
+ list_move_tail(&vma->combined_links.rebind,
+ &vm->rebind_list);
}
spin_unlock(&vm->notifier.list_lock);
if (err)
goto out_unlock;
- list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
+ list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
if (xe_vma_has_no_bo(vma) ||
vma->gpuva.flags & XE_VMA_DESTROYED)
continue;
list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&vma->userptr.invalidate_link);
- list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
+ if (list_empty(&vma->combined_links.userptr))
+ list_move_tail(&vma->combined_links.userptr,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */
- list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
+ list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
+ combined_links.userptr) {
err = xe_vma_userptr_pin_pages(vma);
if (err < 0)
goto out_err;
- list_move_tail(&vma->userptr_link, &tmp_evict);
+ list_move_tail(&vma->combined_links.userptr, &tmp_evict);
}
/* Take lock and move to rebind_list for rebinding. */
if (err)
goto out_err;
- list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
- list_del_init(&vma->userptr_link);
- list_move_tail(&vma->rebind_link, &vm->rebind_list);
- }
+ list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
+ list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
dma_resv_unlock(xe_vm_resv(vm));
return NULL;
xe_vm_assert_held(vm);
- list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
+ list_for_each_entry_safe(vma, next, &vm->rebind_list,
+ combined_links.rebind) {
XE_WARN_ON(!vma->tile_present);
- list_del_init(&vma->rebind_link);
+ list_del_init(&vma->combined_links.rebind);
dma_fence_put(fence);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
return vma;
}
- INIT_LIST_HEAD(&vma->rebind_link);
- INIT_LIST_HEAD(&vma->unbind_link);
- INIT_LIST_HEAD(&vma->userptr_link);
+ INIT_LIST_HEAD(&vma->combined_links.rebind);
INIT_LIST_HEAD(&vma->userptr.invalidate_link);
INIT_LIST_HEAD(&vma->notifier.rebind_link);
INIT_LIST_HEAD(&vma->extobj.link);
struct xe_vm *vm = xe_vma_vm(vma);
lockdep_assert_held_write(&vm->lock);
- XE_BUG_ON(!list_empty(&vma->unbind_link));
+ XE_BUG_ON(!list_empty(&vma->combined_links.destroy));
if (xe_vma_is_userptr(vma)) {
XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&vma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- list_del(&vma->userptr_link);
} else if (!xe_vma_is_null(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
}
xe_vm_assert_held(vm);
- if (!list_empty(&vma->rebind_link))
- list_del(&vma->rebind_link);
-
if (fence) {
int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
vma_destroy_cb);
/* easy case, remove from VMA? */
if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
+ list_del_init(&vma->combined_links.rebind);
xe_vma_destroy(vma, NULL);
continue;
}
- list_add_tail(&vma->unbind_link, &contested);
+ list_move_tail(&vma->combined_links.destroy, &contested);
}
/*
* Since we hold a refcount to the bo, we can remove and free
* the members safely without locking.
*/
- list_for_each_entry_safe(vma, next_vma, &contested, unbind_link) {
- list_del_init(&vma->unbind_link);
+ list_for_each_entry_safe(vma, next_vma, &contested,
+ combined_links.destroy) {
+ list_del_init(&vma->combined_links.destroy);
xe_vma_destroy_unlocked(vma);
}
*/
u64 tile_present;
- /** @userptr_link: link into VM repin list if userptr */
- struct list_head userptr_link;
-
- /**
- * @rebind_link: link into VM if this VMA needs rebinding, and
- * if it's a bo (not userptr) needs validation after a possible
- * eviction. Protected by the vm's resv lock.
- */
- struct list_head rebind_link;
-
- /**
- * @unbind_link: link or list head if an unbind of multiple VMAs, in
- * single unbind op, is being done.
- */
- struct list_head unbind_link;
+ /** @combined_links: links into lists which are mutually exclusive */
+ union {
+ /**
+ * @userptr: link into VM repin list if userptr. Protected by
+ * vm->lock in write mode.
+ */
+ struct list_head userptr;
+ /**
+ * @rebind: link into VM if this VMA needs rebinding, and
+ * if it's a bo (not userptr) needs validation after a possible
+ * eviction. Protected by the vm's resv lock and typically
+ * vm->lock is also held in write mode. The only place where
+ * vm->lock isn't held is the BO eviction path which has
+ * mutually exclusive execution with userptr.
+ */
+ struct list_head rebind;
+ /**
+ * @destroy: link to contested list when VM is being closed.
+ * Protected by vm->lock in write mode and vm's resv lock.
+ */
+ struct list_head destroy;
+ } combined_links;
/** @destroy_cb: callback to destroy VMA when unbind job is done */
struct dma_fence_cb destroy_cb;