* fd_install step out of the driver backend hooks, to make that
         * final step optional for internal users.
         */
-       ret = drm_gem_vmap(buffer->gem, map);
+       ret = drm_gem_vmap_unlocked(buffer->gem, map);
        if (ret)
                return ret;
 
 {
        struct iosys_map *map = &buffer->map;
 
-       drm_gem_vunmap(buffer->gem, map);
+       drm_gem_vunmap_unlocked(buffer->gem, map);
 }
 EXPORT_SYMBOL(drm_client_buffer_vunmap);
 
 
 {
        int ret;
 
+       dma_resv_assert_held(obj->resv);
+
        if (!obj->funcs->vmap)
                return -EOPNOTSUPP;
 
 
 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
 {
+       dma_resv_assert_held(obj->resv);
+
        if (iosys_map_is_null(map))
                return;
 
 }
 EXPORT_SYMBOL(drm_gem_vunmap);
 
+int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       int ret;
+
+       dma_resv_lock(obj->resv, NULL);
+       ret = drm_gem_vmap(obj, map);
+       dma_resv_unlock(obj->resv);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+
+void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       dma_resv_lock(obj->resv, NULL);
+       drm_gem_vunmap(obj, map);
+       dma_resv_unlock(obj->resv);
+}
+EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+
 /**
  * drm_gem_lock_reservations - Sets up the ww context and acquires
  * the lock on an array of GEM objects.
 
 
        if (gem_obj->import_attach) {
                if (dma_obj->vaddr)
-                       dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
+                       dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
                drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
        } else if (dma_obj->vaddr) {
                if (dma_obj->map_noncoherent)
        struct iosys_map map;
        int ret;
 
-       ret = dma_buf_vmap(attach->dmabuf, &map);
+       ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
        if (ret) {
                DRM_ERROR("Failed to vmap PRIME buffer\n");
                return ERR_PTR(ret);
 
        obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
        if (IS_ERR(obj)) {
-               dma_buf_vunmap(attach->dmabuf, &map);
+               dma_buf_vunmap_unlocked(attach->dmabuf, &map);
                return obj;
        }
 
 
                        ret = -EINVAL;
                        goto err_drm_gem_vunmap;
                }
-               ret = drm_gem_vmap(obj, &map[i]);
+               ret = drm_gem_vmap_unlocked(obj, &map[i]);
                if (ret)
                        goto err_drm_gem_vunmap;
        }
                obj = drm_gem_fb_get_obj(fb, i);
                if (!obj)
                        continue;
-               drm_gem_vunmap(obj, &map[i]);
+               drm_gem_vunmap_unlocked(obj, &map[i]);
        }
        return ret;
 }
                        continue;
                if (iosys_map_is_null(&map[i]))
                        continue;
-               drm_gem_vunmap(obj, &map[i]);
+               drm_gem_vunmap_unlocked(obj, &map[i]);
        }
 }
 EXPORT_SYMBOL(drm_gem_fb_vunmap);
 
                     struct iosys_map *map)
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
-       int ret;
-
-       dma_resv_lock(gem->resv, NULL);
-       ret = ttm_bo_vmap(bo, map);
-       dma_resv_unlock(gem->resv);
 
-       return ret;
+       return ttm_bo_vmap(bo, map);
 }
 EXPORT_SYMBOL(drm_gem_ttm_vmap);
 
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 
-       dma_resv_lock(gem->resv, NULL);
        ttm_bo_vunmap(bo, map);
-       dma_resv_unlock(gem->resv);
 }
 EXPORT_SYMBOL(drm_gem_ttm_vunmap);
 
 
                } else {
                        buffer_chunk->size = lima_bo_size(bo);
 
-                       ret = drm_gem_shmem_vmap(&bo->base, &map);
+                       ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
                        if (ret) {
                                kvfree(et);
                                goto out;
 
                        memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
 
-                       drm_gem_shmem_vunmap(&bo->base, &map);
+                       drm_gem_vunmap_unlocked(&bo->base.base, &map);
                }
 
                buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
 
                        goto dump_header;
                }
 
-               ret = drm_gem_shmem_vmap(&bo->base, &map);
+               ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
                if (ret) {
                        dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
                        iter.hdr->bomap.valid = 0;
                vaddr = map.vaddr;
                memcpy(iter.data, vaddr, bo->base.base.size);
 
-               drm_gem_shmem_vunmap(&bo->base, &map);
+               drm_gem_vunmap_unlocked(&bo->base.base, &map);
 
                iter.hdr->bomap.valid = cpu_to_le32(1);
 
 
                goto err_close_bo;
        }
 
-       ret = drm_gem_shmem_vmap(bo, &map);
+       ret = drm_gem_vmap_unlocked(&bo->base, &map);
        if (ret)
                goto err_put_mapping;
        perfcnt->buf = map.vaddr;
        return 0;
 
 err_vunmap:
-       drm_gem_shmem_vunmap(bo, &map);
+       drm_gem_vunmap_unlocked(&bo->base, &map);
 err_put_mapping:
        panfrost_gem_mapping_put(perfcnt->mapping);
 err_close_bo:
                  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
 
        perfcnt->user = NULL;
-       drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map);
+       drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
        perfcnt->buf = NULL;
        panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
        panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
 
                bo->map_count++;
                goto out;
        }
-       r = ttm_bo_vmap(&bo->tbo, &bo->map);
+
+       r = __qxl_bo_pin(bo);
        if (r)
                return r;
+
+       r = ttm_bo_vmap(&bo->tbo, &bo->map);
+       if (r) {
+               __qxl_bo_unpin(bo);
+               return r;
+       }
        bo->map_count = 1;
 
        /* TODO: Remove kptr in favor of map everywhere. */
        if (r)
                return r;
 
-       r = __qxl_bo_pin(bo);
-       if (r) {
-               qxl_bo_unreserve(bo);
-               return r;
-       }
-
        r = qxl_bo_vmap_locked(bo, map);
        qxl_bo_unreserve(bo);
        return r;
                return;
        bo->kptr = NULL;
        ttm_bo_vunmap(&bo->tbo, &bo->map);
+       __qxl_bo_unpin(bo);
 }
 
 int qxl_bo_vunmap(struct qxl_bo *bo)
                return r;
 
        qxl_bo_vunmap_locked(bo);
-       __qxl_bo_unpin(bo);
        qxl_bo_unreserve(bo);
        return 0;
 }
 
        struct qxl_bo *bo = gem_to_qxl_bo(obj);
        int ret;
 
-       ret = qxl_bo_vmap(bo, map);
+       ret = qxl_bo_vmap_locked(bo, map);
        if (ret < 0)
                return ret;
 
 {
        struct qxl_bo *bo = gem_to_qxl_bo(obj);
 
-       qxl_bo_vunmap(bo);
+       qxl_bo_vunmap_locked(bo);
 }
 
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                bool dirty, bool accessed);
 
+int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+
 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
                           int count, struct drm_gem_object ***objs_out);
 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);