if (XE_IOCTL_ERR(xe, e->vm && xe_vm_in_compute_mode(e->vm) !=
!!(e->flags & ENGINE_FLAG_COMPUTE_MODE))) {
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto put_engine;
}
fallthrough;
case DRM_XE_MMIO_8BIT: /* TODO */
case DRM_XE_MMIO_16BIT: /* TODO */
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
goto exit;
}
}
fallthrough;
case DRM_XE_MMIO_8BIT: /* TODO */
case DRM_XE_MMIO_16BIT: /* TODO */
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
}
}
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr)))
return -EINVAL;
case DRM_XE_SYNC_TIMELINE_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr)))
return -EINVAL;
case DRM_XE_SYNC_USER_FENCE:
if (XE_IOCTL_ERR(xe, !signal))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, sync_in.addr & 0x7))
return -EINVAL;
return -EINVAL;
if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
vm->async_ops.error_capture.mm = current->mm;
vm->async_ops.error_capture.addr = value;
if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
err = EINVAL;
if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto put_engine;
}
if (XE_IOCTL_ERR(xe, !vm->async_ops.error_capture.addr)) {
xe_vm_put(vm);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
addr = vm->async_ops.error_capture.addr;