}
}
+static inline void amdggpu_device_stop_pedning_resets(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+#if defined(CONFIG_DEBUG_FS)
+ if (!amdgpu_sriov_vf(adev))
+ cancel_work(&adev->reset_work);
+#endif
+
+ if (adev->kfd.dev)
+ cancel_work(&adev->kfd.reset_work);
+
+ if (amdgpu_sriov_vf(adev))
+ cancel_work(&adev->virt.flr_work);
+
+ if (con && adev->ras_enabled)
+ cancel_work(&con->recovery_work);
+
+}
+
+
/**
* amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
*
r, adev_to_drm(tmp_adev)->unique);
tmp_adev->asic_reset_res = r;
}
+
+ /*
+ * Drop all pending non scheduler resets. Scheduler resets
+ * were already dropped during drm_sched_stop
+ */
+ amdggpu_device_stop_pedning_resets(tmp_adev);
}
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));