The current only submission in the driver that doesn't use a vm is the
WA setup. We still pass a vm structure (the migration one), but we don't
actually use it at submission time and we instead have an hack to use
GGTT for this particular engine.
Instead of special-casing the WA engine, we can skip providing a VM and
use that as selector for whether to use GGTT or PPGTT. As part of this
change, we can drop the special engine flag for the WA engine and switch
the WA submission to use the standard job functions instead of dedicated
ones.
v2: rebased on s/engine/exec_queue
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230822173334.1664332-4-daniele.ceraolospurio@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
return xe_sched_job_create(q, addr);
}
-struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
- struct xe_bb *bb, u64 batch_base_ofs)
-{
- u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
-
- XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
-
- return __xe_bb_create_job(q, bb, &addr);
-}
-
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb,
u64 batch_base_ofs,
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_ofs,
u32 second_idx);
-struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
- struct xe_bb *bb, u64 batch_ofs);
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
#endif
* can perform GuC CT actions when needed. Caller is expected to
* have already grabbed the rpm ref outside any sensitive locks.
*/
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
return q;
xe_lrc_finish(q->lrc + i);
if (q->vm)
xe_vm_put(q->vm);
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
xe_device_mem_access_put(gt_to_xe(q->gt));
kfree(q);
#define EXEC_QUEUE_FLAG_VM BIT(5)
/* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6)
-/* queue used for WA setup */
-#define EXEC_QUEUE_FLAG_WA BIT(7)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
struct xe_sched_job *job;
struct xe_bb *bb;
struct dma_fence *fence;
- u64 batch_ofs;
long timeout;
bb = xe_bb_new(gt, 4, false);
if (IS_ERR(bb))
return PTR_ERR(bb);
- batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
- job = xe_bb_create_wa_job(q, bb, batch_ofs);
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
return PTR_ERR(job);
struct xe_sched_job *job;
struct xe_bb *bb;
struct dma_fence *fence;
- u64 batch_ofs;
long timeout;
int count = 0;
}
}
- batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
- job = xe_bb_create_wa_job(q, bb, batch_ofs);
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
return PTR_ERR(job);
int xe_gt_record_default_lrcs(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
int err = 0;
for_each_hw_engine(hwe, gt, id) {
struct xe_exec_queue *q, *nop_q;
- struct xe_vm *vm;
void *default_lrc;
if (gt->default_lrc[hwe->class])
if (!default_lrc)
return -ENOMEM;
- vm = xe_migrate_get_vm(tile->migrate);
- q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1,
- hwe, EXEC_QUEUE_FLAG_WA);
+ q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
+ hwe, EXEC_QUEUE_FLAG_KERNEL);
if (IS_ERR(q)) {
err = PTR_ERR(q);
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
hwe->name, q);
- goto put_vm;
+ return err;
}
/* Prime golden LRC with known good state */
goto put_exec_queue;
}
- nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance),
- 1, hwe, EXEC_QUEUE_FLAG_WA);
+ nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
+ 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
if (IS_ERR(nop_q)) {
err = PTR_ERR(nop_q);
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
xe_exec_queue_put(nop_q);
put_exec_queue:
xe_exec_queue_put(q);
-put_vm:
- xe_vm_put(vm);
if (err)
break;
}
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
- return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0;
+ return job->q->vm ? BIT(8) : 0;
}
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
bool xe_sched_job_is_migration(struct xe_exec_queue *q)
{
- return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) &&
- !(q->flags & EXEC_QUEUE_FLAG_WA);
+ return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
}
static void job_free(struct xe_sched_job *job)
XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
/* Migration and kernel engines have their own locking */
- if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
- EXEC_QUEUE_FLAG_WA))) {
+ if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
lockdep_assert_held(&q->vm->lock);
if (!xe_vm_no_dma_fences(q->vm))
xe_vm_assert_held(q->vm);