drm/amdgpu: Rename amdgpu_device_gpu_recover_imp back to amdgpu_device_gpu_recover
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Tue, 17 May 2022 18:27:49 +0000 (14:27 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 10 Jun 2022 19:26:12 +0000 (15:26 -0400)
We removed the wrapper that was queueing the recover function
into reset domain queue who was using this name.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

index ddb36c33b4bce4438c74810da94e84ce31294c9e..fb9399a999aef30aa8e75543362fe9047792e79d 100644 (file)
@@ -1254,7 +1254,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job* job);
-int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
index a23abc0e86e7223bf864acbe1de1c3d297603855..513c57f839d8171c7dff1a8e6299cb4c1cd04b76 100644 (file)
@@ -129,7 +129,7 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  kfd.reset_work);
 
-       amdgpu_device_gpu_recover_imp(adev, NULL);
+       amdgpu_device_gpu_recover(adev, NULL);
 }
 
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
index 2d490941e727530545aae5decece05d469297f7f..2d5a623598b81972b613fa28833168867ed9b5a3 100644 (file)
@@ -5076,7 +5076,7 @@ retry:
  * Returns 0 for success or an error on failure.
  */
 
-int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
index b0498ffcf7c33425fce047589d4b4834b9200044..957437a5558c11ed0887120ab0167a33f5caec1f 100644 (file)
@@ -819,7 +819,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  reset_work);
 
-       amdgpu_device_gpu_recover_imp(adev, NULL);
+       amdgpu_device_gpu_recover(adev, NULL);
 }
 
 #endif
index 67f66f2f1809cb8597697163c050f382832c557f..26ede765eed89ad90cf485aee991a7d4d3e423a3 100644 (file)
@@ -64,7 +64,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
        if (amdgpu_device_should_recover_gpu(ring->adev)) {
-               r = amdgpu_device_gpu_recover_imp(ring->adev, job);
+               r = amdgpu_device_gpu_recover(ring->adev, job);
                if (r)
                        DRM_ERROR("GPU Recovery Failed: %d\n", r);
        } else {
index b3b5ebbae82feb8d9cde6b58b5fded990c5b3956..285534bfc084b73470f59e7e69e77088ab93fd70 100644 (file)
@@ -1939,7 +1939,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
        }
 
        if (amdgpu_device_should_recover_gpu(ras->adev))
-               amdgpu_device_gpu_recover_imp(ras->adev, NULL);
+               amdgpu_device_gpu_recover(ras->adev, NULL);
        atomic_set(&ras->in_recovery, 0);
 }
 
index b81acf59870c5be2a2df32e9a5bbd90f2e41206a..7ec5b5cf4bb94395476d69de12b60db5dcd333b8 100644 (file)
@@ -284,7 +284,7 @@ flr_done:
        if (amdgpu_device_should_recover_gpu(adev)
                && (!amdgpu_device_has_job_running(adev) ||
                adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
index 22c10b97ea81f7326b3fa5558aaa10aa9ef63e05..e18b75c8fde66cfdebb5e952486b2a8bfeb05e55 100644 (file)
@@ -311,7 +311,7 @@ flr_done:
                adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
index 7b63d30b9b79eedd740e3698b6f1e12a4a70806f..c5016a9263317b9137bc49e3c77d0a777dec52b6 100644 (file)
@@ -523,7 +523,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
 
        /* Trigger recovery due to world switch failure */
        if (amdgpu_device_should_recover_gpu(adev))
-               amdgpu_device_gpu_recover_imp(adev, NULL);
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,