Ensure that there are no drm clients when changing CCS mode.
Allow exec_queue creation only with enabled CCS engines.
v2: Rebase
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
+ spin_lock(&xe->clients.lock);
+ xe->clients.count++;
+ spin_unlock(&xe->clients.lock);
+
file->driver_priv = xef;
return 0;
}
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
+ spin_lock(&xe->clients.lock);
+ xe->clients.count--;
+ spin_unlock(&xe->clients.lock);
+
xe_drm_client_put(xef->client);
kfree(xef);
}
xe->info.force_execlist = xe_modparam.force_execlist;
spin_lock_init(&xe->irq.lock);
+ spin_lock_init(&xe->clients.lock);
init_waitqueue_head(&xe->ufence_wq);
enum xe_sriov_mode __mode;
} sriov;
+ /** @clients: drm clients info */
+ struct {
+ /** @lock: Protects drm clients info */
+ spinlock_t lock;
+
+ /** @count: number of drm clients */
+ u64 count;
+ } clients;
+
/** @usm: unified memory state */
struct {
/** @asid: convert a ASID to VM */
const char *buff, size_t count)
{
struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
u32 num_engines, num_slices;
int ret;
return -EINVAL;
}
+ /* CCS mode can only be updated when there are no drm clients */
+ spin_lock(&xe->clients.lock);
+ if (xe->clients.count) {
+ spin_unlock(&xe->clients.lock);
+ return -EBUSY;
+ }
+
if (gt->ccs_mode != num_engines) {
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_reset_async(gt);
}
+ spin_unlock(&xe->clients.lock);
+
return count;
}