When the last reference to a TTM BO is dropped, ttm_bo_release() will
acquire the DMA reservation object's wound/wait mutex while trying to
clean up (ttm_bo_cleanup_refs_or_queue() via ttm_bo_release()). It is
therefore essential that drm_gem_object_release() be called after the
TTM BO has been uninitialized, otherwise drm_gem_object_release() has
already destroyed the wound/wait mutex (via dma_resv_fini()).
Signed-off-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->bo.base.filp))
- DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+
+ /*
+ * If nouveau_bo_new() allocated this buffer, the GEM object was never
+ * initialized, so don't attempt to release it.
+ */
+ if (bo->base.dev)
+ drm_gem_object_release(&bo->base);
+
kfree(nvbo);
}
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
- drm_gem_object_release(gem);
-
- /* reset filp so nouveau_bo_del_ttm() can test for it */
- gem->filp = NULL;
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);