if (ret)
                goto err_release;
 
+       mutex_init(&shmem->pages_lock);
+       mutex_init(&shmem->vmap_lock);
        INIT_LIST_HEAD(&shmem->madv_list);
 
        if (!private) {
 {
        struct drm_gem_object *obj = &shmem->base;
 
+       drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+
        if (obj->import_attach) {
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
-               dma_resv_lock(shmem->base.resv, NULL);
-
-               drm_WARN_ON(obj->dev, shmem->vmap_use_count);
-
                if (shmem->sgt) {
                        dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
                                          DMA_BIDIRECTIONAL, 0);
                }
                if (shmem->pages)
                        drm_gem_shmem_put_pages(shmem);
-
-               drm_WARN_ON(obj->dev, shmem->pages_use_count);
-
-               dma_resv_unlock(shmem->base.resv);
        }
 
+       drm_WARN_ON(obj->dev, shmem->pages_use_count);
+
        drm_gem_object_release(obj);
+       mutex_destroy(&shmem->pages_lock);
+       mutex_destroy(&shmem->vmap_lock);
        kfree(shmem);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
 
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct page **pages;
 }
 
 /*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
  * @shmem: shmem GEM object
  *
- * This function decreases the use count and puts the backing pages when use drops to zero.
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
  */
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
+       int ret;
 
-       dma_resv_assert_held(shmem->base.resv);
+       drm_WARN_ON(obj->dev, obj->import_attach);
+
+       ret = mutex_lock_interruptible(&shmem->pages_lock);
+       if (ret)
+               return ret;
+       ret = drm_gem_shmem_get_pages_locked(shmem);
+       mutex_unlock(&shmem->pages_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+       struct drm_gem_object *obj = &shmem->base;
 
        if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                return;
                          shmem->pages_mark_accessed_on_put);
        shmem->pages = NULL;
 }
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+       mutex_lock(&shmem->pages_lock);
+       drm_gem_shmem_put_pages_locked(shmem);
+       mutex_unlock(&shmem->pages_lock);
+}
 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
 
 /**
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       dma_resv_assert_held(shmem->base.resv);
-
        drm_WARN_ON(obj->dev, obj->import_attach);
 
        return drm_gem_shmem_get_pages(shmem);
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       dma_resv_assert_held(shmem->base.resv);
-
        drm_WARN_ON(obj->dev, obj->import_attach);
 
        drm_gem_shmem_put_pages(shmem);
 }
 EXPORT_SYMBOL(drm_gem_shmem_unpin);
 
-/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
- * @shmem: shmem GEM object
- * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
- *       store.
- *
- * This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object. It hides the differences
- * between dma-buf imported and natively allocated objects.
- *
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
-                      struct iosys_map *map)
+static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+                                    struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
        int ret = 0;
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
-               dma_resv_assert_held(shmem->base.resv);
-
                if (shmem->vmap_use_count++ > 0) {
                        iosys_map_set_vaddr(map, shmem->vaddr);
                        return 0;
 
        return ret;
 }
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
 
 /*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
  * @shmem: shmem GEM object
- * @map: Kernel virtual address where the SHMEM GEM object was mapped
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
+ *       store.
  *
- * This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
  *
- * This function hides the differences between dma-buf imported and natively
- * allocated objects.
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
  */
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
-                         struct iosys_map *map)
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+                      struct iosys_map *map)
+{
+       int ret;
+
+       ret = mutex_lock_interruptible(&shmem->vmap_lock);
+       if (ret)
+               return ret;
+       ret = drm_gem_shmem_vmap_locked(shmem, map);
+       mutex_unlock(&shmem->vmap_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+                                       struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
 
        if (obj->import_attach) {
                dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
-               dma_resv_assert_held(shmem->base.resv);
-
                if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
                        return;
 
 
        shmem->vaddr = NULL;
 }
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
+ *
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
+ * zero.
+ *
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+                         struct iosys_map *map)
+{
+       mutex_lock(&shmem->vmap_lock);
+       drm_gem_shmem_vunmap_locked(shmem, map);
+       mutex_unlock(&shmem->vmap_lock);
+}
 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
 
 static int
  */
 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
 {
-       dma_resv_assert_held(shmem->base.resv);
+       mutex_lock(&shmem->pages_lock);
 
        if (shmem->madv >= 0)
                shmem->madv = madv;
 
        madv = shmem->madv;
 
+       mutex_unlock(&shmem->pages_lock);
+
        return (madv >= 0);
 }
 EXPORT_SYMBOL(drm_gem_shmem_madvise);
 
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct drm_device *dev = obj->dev;
 
-       dma_resv_assert_held(shmem->base.resv);
-
        drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
 
        dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
        kfree(shmem->sgt);
        shmem->sgt = NULL;
 
-       drm_gem_shmem_put_pages(shmem);
+       drm_gem_shmem_put_pages_locked(shmem);
 
        shmem->madv = -1;
 
 
        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
 }
+EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
+
+bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+{
+       if (!mutex_trylock(&shmem->pages_lock))
+               return false;
+       drm_gem_shmem_purge_locked(shmem);
+       mutex_unlock(&shmem->pages_lock);
+
+       return true;
+}
 EXPORT_SYMBOL(drm_gem_shmem_purge);
 
 /**
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
-       dma_resv_lock(shmem->base.resv, NULL);
+       mutex_lock(&shmem->pages_lock);
 
        if (page_offset >= num_pages ||
            drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
                ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
        }
 
-       dma_resv_unlock(shmem->base.resv);
+       mutex_unlock(&shmem->pages_lock);
 
        return ret;
 }
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       dma_resv_lock(shmem->base.resv, NULL);
+       mutex_lock(&shmem->pages_lock);
 
        /*
         * We should have already pinned the pages when the buffer was first
        if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                shmem->pages_use_count++;
 
-       dma_resv_unlock(shmem->base.resv);
+       mutex_unlock(&shmem->pages_lock);
 
        drm_gem_vm_open(vma);
 }
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
-       dma_resv_lock(shmem->base.resv, NULL);
        drm_gem_shmem_put_pages(shmem);
-       dma_resv_unlock(shmem->base.resv);
-
        drm_gem_vm_close(vma);
 }
 
                return dma_buf_mmap(obj->dma_buf, vma, 0);
        }
 
-       dma_resv_lock(shmem->base.resv, NULL);
        ret = drm_gem_shmem_get_pages(shmem);
-       dma_resv_unlock(shmem->base.resv);
-
        if (ret)
                return ret;
 
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       ret = drm_gem_shmem_get_pages(shmem);
+       ret = drm_gem_shmem_get_pages_locked(shmem);
        if (ret)
                return ERR_PTR(ret);
 
        sg_free_table(sgt);
        kfree(sgt);
 err_put_pages:
-       drm_gem_shmem_put_pages(shmem);
+       drm_gem_shmem_put_pages_locked(shmem);
        return ERR_PTR(ret);
 }
 
        int ret;
        struct sg_table *sgt;
 
-       ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
+       ret = mutex_lock_interruptible(&shmem->pages_lock);
        if (ret)
                return ERR_PTR(ret);
        sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
-       dma_resv_unlock(shmem->base.resv);
+       mutex_unlock(&shmem->pages_lock);
 
        return sgt;
 }