adev->gmc.visible_vram_size = vis_vram_limit;
 
        /* Change the size here instead of the init above so only lpfn is affected */
-       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
 #ifdef CONFIG_64BIT
        adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
                                                adev->gmc.visible_vram_size);
        DRM_INFO("amdgpu: ttm finalized\n");
 }
 
-/* this should only be called at bootup or when userspace
- * isn't running */
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
+/**
+ * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: true when we can use buffer functions.
+ *
+ * Enable/disable use of buffer functions during suspend/resume. This should
+ * only be called at bootup or when userspace isn't running.
+ */
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 {
-       struct ttm_mem_type_manager *man;
+       struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+       uint64_t size;
 
        if (!adev->mman.initialized)
                return;
 
-       man = &adev->mman.bdev.man[TTM_PL_VRAM];
        /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+       if (enable)
+               size = adev->gmc.real_vram_size;
+       else
+               size = adev->gmc.visible_vram_size;
        man->size = size >> PAGE_SHIFT;
 }
 
 
 
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+                                       bool enable);
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
 
 
        if ((adev->mman.buffer_funcs_ring == sdma0) ||
            (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
 
        if ((adev->mman.buffer_funcs_ring == sdma0) ||
            (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+               amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
 
        if ((adev->mman.buffer_funcs_ring == sdma0) ||
            (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+               amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
 
        if ((adev->mman.buffer_funcs_ring == sdma0) ||
            (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
 
        }
 
 
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
                ring->ready = false;
        }
 }
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;