bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job* job);
-int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  kfd.reset_work);
 
-       amdgpu_device_gpu_recover_imp(adev, NULL);
+       amdgpu_device_gpu_recover(adev, NULL);
 }
 
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 
  * Returns 0 for success or an error on failure.
  */
 
-int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
 
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  reset_work);
 
-       amdgpu_device_gpu_recover_imp(adev, NULL);
+       amdgpu_device_gpu_recover(adev, NULL);
 }
 
 #endif
 
                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
        if (amdgpu_device_should_recover_gpu(ring->adev)) {
-               r = amdgpu_device_gpu_recover_imp(ring->adev, job);
+               r = amdgpu_device_gpu_recover(ring->adev, job);
                if (r)
                        DRM_ERROR("GPU Recovery Failed: %d\n", r);
        } else {
 
        }
 
        if (amdgpu_device_should_recover_gpu(ras->adev))
-               amdgpu_device_gpu_recover_imp(ras->adev, NULL);
+               amdgpu_device_gpu_recover(ras->adev, NULL);
        atomic_set(&ras->in_recovery, 0);
 }
 
 
        if (amdgpu_device_should_recover_gpu(adev)
                && (!amdgpu_device_has_job_running(adev) ||
                adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
 
                adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
 
 
        /* Trigger recovery due to world switch failure */
        if (amdgpu_device_should_recover_gpu(adev))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,