}
        return err;
 }
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *sdma;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (adev->sdma.has_page_queue) {
+                       sdma = &adev->sdma.instance[i].page;
+                       if (adev->mman.buffer_funcs_ring == sdma) {
+                               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+                               break;
+                       }
+               }
+               sdma = &adev->sdma.instance[i].ring;
+               if (adev->mman.buffer_funcs_ring == sdma) {
+                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+                       break;
+               }
+       }
+}
 
         char *fw_name, u32 instance, bool duplicate);
 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
         bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
 #endif
 
  */
 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 
  */
 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 
  */
 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 
  */
 static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
-       int i, unset = 0;
+       int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               sdma[i] = &adev->sdma.instance[i].ring;
-
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = 1;
-               }
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
  */
 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
        int i;
-       bool unset = false;
 
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               sdma[i] = &adev->sdma.instance[i].page;
-
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
-                       (!unset)) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = true;
-               }
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
                                        RB_ENABLE, 0);
 
  */
 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
 
  */
 static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
-       struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
-       struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1) ||
-           (adev->mman.buffer_funcs_ring == sdma2) ||
-           (adev->mman.buffer_funcs_ring == sdma3))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
 
  */
 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
                WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
        }
-
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
 
        u32 rb_cntl;
        unsigned i;
 
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
                /* dma0 */
                rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~DMA_RB_ENABLE;
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
        }
 }