struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
+ xe_exec_queue_last_fence_put_unlocked(q);
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
multi_gt_link)
return ret;
}
+
+static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ lockdep_assert_held_write(&vm->lock);
+}
+
+/**
+ * xe_exec_queue_last_fence_put() - Drop ref to last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ */
+void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ if (q->last_fence) {
+ dma_fence_put(q->last_fence);
+ q->last_fence = NULL;
+ }
+}
+
+/**
+ * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
+ * @q: The exec queue
+ *
+ * Only safe to be called from xe_exec_queue_destroy().
+ */
+void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
+{
+ if (q->last_fence) {
+ dma_fence_put(q->last_fence);
+ q->last_fence = NULL;
+ }
+}
+
+/**
+ * xe_exec_queue_last_fence_get() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, does not take a ref
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put(q, vm);
+
+ return q->last_fence ? q->last_fence : dma_fence_get_stub();
+}
+
+/**
+ * xe_exec_queue_last_fence_set() - Set last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ * @fence: The fence
+ *
+ * Set the last fence for the engine. Increases reference count for fence, when
+ * closing engine xe_exec_queue_last_fence_put should be called.
+ */
+void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
+ struct dma_fence *fence)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ xe_exec_queue_last_fence_put(q, vm);
+ q->last_fence = dma_fence_get(fence);
+}
if (xe_vm_in_compute_mode(vm))
flush_work(&vm->preempt.rebind_work);
+ down_write(&vm->lock);
+ for_each_tile(tile, xe, id) {
+ if (vm->q[id])
+ xe_exec_queue_last_fence_put(vm->q[id], vm);
+ }
+ up_write(&vm->lock);
+
for_each_tile(tile, xe, id) {
if (vm->q[id]) {
xe_exec_queue_kill(vm->q[id]);
tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
}
+static struct xe_exec_queue *
+to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ return q ? q : vm->q[0];
+}
+
static struct dma_fence *
xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op)
{
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
struct xe_tile *tile;
struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = xe_vma_vm(vma);
int cur_fence = 0, i;
int number_tiles = hweight8(vma->tile_present);
int err;
cf ? &cf->base : fence);
}
- return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
+ return cf ? &cf->base : !fence ?
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
err_fences:
if (fences) {
bool last_op)
{
struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
- fence = dma_fence_get_stub();
+ fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) {
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, fence);
}
}
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
if (last_op && xe_vm_sync_mode(vm, q))
dma_fence_wait(fence, true);
dma_fence_put(fence);
u32 num_syncs, bool first_op, bool last_op)
{
struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
return PTR_ERR(fence);
xe_vma_destroy(vma, fence);
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
if (last_op && xe_vm_sync_mode(vm, q))
dma_fence_wait(fence, true);
dma_fence_put(fence);
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op)
{
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
int err;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
/* Nothing to do, signal fences now */
if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
- dma_fence_get_stub());
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence =
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm);
+
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
}
return 0;
unwind_ops:
vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
free_syncs:
- for (i = 0; err == -ENODATA && i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, dma_fence_get_stub());
+ for (i = 0; err == -ENODATA && i < num_syncs; i++) {
+ struct dma_fence *fence =
+ xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
+
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);