From: Brian Welty Date: Fri, 5 Jan 2024 19:04:40 +0000 (-0800) Subject: drm/xe: Fix modifying exec_queue priority in xe_migrate_init X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=19c02225242498eea9267d444ee1276016368d49;p=linux.git drm/xe: Fix modifying exec_queue priority in xe_migrate_init After exec_queue has been created, we cannot simply modify q->priority. This needs to be done by the backend via q->ops. However in this case, it would be more efficient to simply pass a flag when creating the exec_queue and set the desired priority upfront during queue creation. To that end: new flag EXEC_QUEUE_FLAG_HIGH_PRIORITY is introduced. The priority field is moved to be with other scheduling properties and is now exec_queue.sched_props.priority. This is no longer set to initial value by the backend, but is now set within __xe_exec_queue_create(). Fixes: b4eecedc75c1 ("drm/xe: Fix potential deadlock handling page faults") Signed-off-by: Brian Welty Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit a8004af338f6b3319476ecbed63ea49bf393fc1f) Signed-off-by: Thomas Hellström --- diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 44fe8097b7cda..bcfc4127c7c59 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -67,6 +67,11 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; q->sched_props.preempt_timeout_us = hwe->eclass->sched_props.preempt_timeout_us; + if (q->flags & EXEC_QUEUE_FLAG_KERNEL && + q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) + q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; + else + q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; if (xe_exec_queue_is_parallel(q)) { q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 3d7e704ec3d9f..8d4b7feb8c306 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -52,8 +52,6 @@ struct xe_exec_queue { struct xe_vm *vm; /** @class: class of this exec queue */ enum xe_engine_class class; - /** @priority: priority of this exec queue */ - enum xe_exec_queue_priority priority; /** * @logical_mask: logical mask of where job submitted to exec queue can run */ @@ -84,6 +82,8 @@ struct xe_exec_queue { #define EXEC_QUEUE_FLAG_VM BIT(4) /* child of VM queue for multi-tile VM jobs */ #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5) +/* kernel exec_queue only, set priority to highest level */ +#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6) /** * @flags: flags for this exec queue, should statically setup aside from ban @@ -142,6 +142,8 @@ struct xe_exec_queue { u32 timeslice_us; /** @preempt_timeout_us: preemption timeout in micro-seconds */ u32 preempt_timeout_us; + /** @priority: priority of this exec queue */ + enum xe_exec_queue_priority priority; } sched_props; /** @compute: compute exec queue state */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 5de3ac47c4623..54ffcfcdd41f9 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) { struct exec_queue_policy policy; struct xe_device *xe = guc_to_xe(guc); - enum xe_exec_queue_priority prio = q->priority; + enum xe_exec_queue_priority prio = q->sched_props.priority; u32 timeslice_us = q->sched_props.timeslice_us; u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; @@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) err = xe_sched_entity_init(&ge->entity, sched); if (err) goto err_sched; - q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; if (xe_exec_queue_is_lr(q)) INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); @@ -1301,14 +1300,14 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q, { struct xe_sched_msg *msg; - if (q->priority == priority || exec_queue_killed_or_banned(q)) + if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q)) return 0; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; - q->priority = priority; + q->sched_props.priority = priority; guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); return 0; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index adf1dab5eba25..02fca8f9adc28 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -344,7 +344,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, EXEC_QUEUE_FLAG_KERNEL | - EXEC_QUEUE_FLAG_PERMANENT); + EXEC_QUEUE_FLAG_PERMANENT | + EXEC_QUEUE_FLAG_HIGH_PRIORITY); } else { m->q = xe_exec_queue_create_class(xe, primary_gt, vm, XE_ENGINE_CLASS_COPY, @@ -355,8 +356,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) xe_vm_close_and_put(vm); return ERR_CAST(m->q); } - if (xe->info.has_usm) - m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; mutex_init(&m->job_mutex);