struct amdgpu_bo *bo;
        int ret;
 
-       if (dma_buf->ops != &amdgpu_dmabuf_ops)
-               /* Can't handle non-graphics buffers */
-               return -EINVAL;
-
-       obj = dma_buf->priv;
-       if (drm_to_adev(obj->dev) != adev)
-               /* Can't handle buffers from other devices */
-               return -EINVAL;
+       obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        bo = gem_to_amdgpu_bo(obj);
        if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
-                                   AMDGPU_GEM_DOMAIN_GTT)))
+                                   AMDGPU_GEM_DOMAIN_GTT))) {
                /* Only VRAM and GTT BOs are supported */
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_obj;
+       }
 
        *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
-       if (!*mem)
-               return -ENOMEM;
+       if (!*mem) {
+               ret = -ENOMEM;
+               goto err_put_obj;
+       }
 
        ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
-       if (ret) {
-               kfree(*mem);
-               return ret;
-       }
+       if (ret)
+               goto err_free_mem;
 
        if (size)
                *size = amdgpu_bo_size(bo);
                | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
                | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 
-       drm_gem_object_get(&bo->tbo.base);
+       get_dma_buf(dma_buf);
+       (*mem)->dmabuf = dma_buf;
        (*mem)->bo = bo;
        (*mem)->va = va;
        (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
        (*mem)->is_imported = true;
 
        return 0;
+
+err_free_mem:
+       kfree(*mem);
+err_put_obj:
+       drm_gem_object_put(obj);
+       return ret;
 }
 
 /* Evict a userptr BO by stopping the queues if necessary