return 0;
 }
 
+static int sdma_v5_2_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 grbm_soft_reset;
+       u32 tmp;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               grbm_soft_reset = REG_SET_FIELD(0,
+                                               GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+                                               1);
+               grbm_soft_reset <<= i;
+
+               tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+               tmp |= grbm_soft_reset;
+               DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~grbm_soft_reset;
+               WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+               udelay(50);
+       }
+
+       return 0;
+}
+
 /**
  * sdma_v5_2_start - setup and start the async dma engines
  *
                        msleep(1000);
        }
 
+       sdma_v5_2_soft_reset(adev);
        /* unhalt the MEs */
        sdma_v5_2_enable(adev, true);
        /* enable sdma ring preemption */
        return -ETIMEDOUT;
 }
 
-static int sdma_v5_2_soft_reset(void *handle)
-{
-       /* todo */
-
-       return 0;
-}
-
 static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
 {
        int i, r = 0;