return -EFAULT;
if (XE_IOCTL_DBG(xe, sync_in.flags &
- ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) ||
+ ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_FLAG_SIGNAL)) ||
XE_IOCTL_DBG(xe, sync_in.pad) ||
XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL;
- signal = sync_in.flags & DRM_XE_SYNC_SIGNAL;
+ signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
- case DRM_XE_SYNC_SYNCOBJ:
+ case DRM_XE_SYNC_FLAG_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
}
break;
- case DRM_XE_SYNC_TIMELINE_SYNCOBJ:
+ case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
}
break;
- case DRM_XE_SYNC_DMA_BUF:
+ case DRM_XE_SYNC_FLAG_DMA_BUF:
if (XE_IOCTL_DBG(xe, "TODO"))
return -EINVAL;
break;
- case DRM_XE_SYNC_USER_FENCE:
+ case DRM_XE_SYNC_FLAG_USER_FENCE:
if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP;
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
struct dma_fence *fence)
{
- if (!(sync->flags & DRM_XE_SYNC_SIGNAL))
+ if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return;
if (sync->chain_fence) {
dma_fence_put(fence);
}
} else if ((sync->flags & SYNC_FLAGS_TYPE_MASK) ==
- DRM_XE_SYNC_USER_FENCE) {
+ DRM_XE_SYNC_FLAG_USER_FENCE) {
job->user_fence.used = true;
job->user_fence.addr = sync->addr;
job->user_fence.value = sync->timeline_value;
return 0;
}
-#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
- DRM_XE_VM_CREATE_COMPUTE_MODE | \
- DRM_XE_VM_CREATE_ASYNC_DEFAULT | \
- DRM_XE_VM_CREATE_FAULT_MODE)
+#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
+ DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | \
+ DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
return -EINVAL;
if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
- args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
+ args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
!xe->info.supports_usm))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
- args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
xe_device_in_non_fault_mode(xe)))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
+ if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
xe_device_in_fault_mode(xe)))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
- if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
flags |= XE_VM_FLAG_COMPUTE_MODE;
- if (args->flags & DRM_XE_VM_CREATE_ASYNC_DEFAULT)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
flags |= XE_VM_FLAG_ASYNC_DEFAULT;
- if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags);
* Scratch page
* ------------
*
- * If the VM is created with the flag, DRM_XE_VM_CREATE_SCRATCH_PAGE, set the
+ * If the VM is created with the flag, DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, set the
* entire page table structure defaults pointing to blank page allocated by the
* VM. Invalid memory access rather than fault just read / write to this page.
*
return 0;
}
-#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_SOFT_OP | \
- DRM_XE_UFENCE_WAIT_ABSTIME)
+#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | \
+ DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
#define MAX_OP DRM_XE_UFENCE_WAIT_LTE
static long to_jiffies_timeout(struct xe_device *xe,
* Save the timeout to an u64 variable because nsecs_to_jiffies
* might return a value that overflows s32 variable.
*/
- if (args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)
+ if (args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
t = drm_timeout_abs_to_jiffies(args->timeout);
else
t = nsecs_to_jiffies(args->timeout);
u64_to_user_ptr(args->instances);
u64 addr = args->addr;
int err;
- bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP;
+ bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP;
long timeout;
ktime_t start;
}
remove_wait_queue(&xe->ufence_wq, &w_wait);
- if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) {
+ if (!(args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)) {
args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout < 0)
args->timeout = 0;
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
-#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
-#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
-#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
-#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (0x1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (0x1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (0x1 << 2)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (0x1 << 3)
/** @flags: Flags */
__u32 flags;
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
-#define DRM_XE_SYNC_SYNCOBJ 0x0
-#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1
-#define DRM_XE_SYNC_DMA_BUF 0x2
-#define DRM_XE_SYNC_USER_FENCE 0x3
-#define DRM_XE_SYNC_SIGNAL 0x10
+#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
+#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
+#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
+#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
__u32 flags;
/** @pad: MBZ */
/** @op: wait operation (type of comparison) */
__u16 op;
-#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
-#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
+#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
/** @flags: wait flags */
__u16 flags;
__u64 mask;
/**
* @timeout: how long to wait before bailing, value in nanoseconds.
- * Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
* it contains timeout expressed in nanoseconds to wait (fence will
* expire at now() + timeout).
- * When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
* will end at timeout (uses system MONOTONIC_CLOCK).
* Passing negative timeout leads to neverending wait.
*
/**
* @num_engines: number of engine instances to wait on, must be zero
- * when DRM_XE_UFENCE_WAIT_SOFT_OP set
+ * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 num_engines;
/**
* @instances: user pointer to array of drm_xe_engine_class_instance to
- * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
+ * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 instances;