{
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+       amdgpu_ttm_backend_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
        if (gtt->usertask)
                put_task_struct(gtt->usertask);
 
 #if IS_ENABLED(CONFIG_AGP)
        struct nouveau_drm *drm = nouveau_bdev(bdev);
        if (drm->agp.bridge) {
+               ttm_agp_unbind(ttm);
                ttm_tt_destroy_common(bdev, ttm);
                ttm_agp_destroy(ttm);
                return;
 
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 
        if (ttm) {
+               nouveau_sgdma_unbind(bdev, ttm);
                ttm_tt_destroy_common(bdev, ttm);
                ttm_dma_tt_fini(&nvbe->ttm);
                kfree(nvbe);
 
 {
        struct radeon_ttm_tt *gtt = (void *)ttm;
 
+       radeon_ttm_backend_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
 
        ttm_dma_tt_fini(>t->ttm);
        struct radeon_device *rdev = radeon_get_rdev(bdev);
 
        if (rdev->flags & RADEON_IS_AGP) {
+               ttm_agp_unbind(ttm);
                ttm_tt_destroy_common(bdev, ttm);
                ttm_agp_destroy(ttm);
                return;
 
        if (bo->ttm == NULL)
                return;
 
-       ttm_bo_tt_unbind(bo);
        ttm_tt_destroy(bo->bdev, bo->ttm);
        bo->ttm = NULL;
 }
 
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
+       vmw_ttm_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
        vmw_ttm_unmap_dma(vmw_be);
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)