wake_pending_user_interrupt_threads(interrupt);
}
+static void force_complete_cs(struct hl_device *hdev)
+{
+ struct hl_cs *cs;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
+ cs->fence->error = -EIO;
+ complete_all(&cs->fence->completion);
+ }
+
+ spin_unlock(&hdev->cs_mirror_lock);
+}
+
+void hl_abort_waitings_for_completion(struct hl_device *hdev)
+{
+ force_complete_cs(hdev);
+ force_complete_multi_cs(hdev);
+ hl_release_pending_user_interrupts(hdev);
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
+ struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
u32 flags = args->in.flags;
int rc;
- /* If the device is not operational, no point in waiting for any command submission or
- * user interrupt
+ /* If the device is not operational, or if an error has happened and user should release the
+ * device, there is no point in waiting for any command submission or user interrupt.
*/
- if (!hl_device_operational(hpriv->hdev, NULL))
+ if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
void hl_dec_ctx_fini(struct hl_ctx *ctx);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
+void hl_abort_waitings_for_completion(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);