drm/nouveau: Fix ordering between TTM and GEM release
authorThierry Reding <treding@nvidia.com>
Mon, 16 Sep 2019 14:19:25 +0000 (16:19 +0200)
committerBen Skeggs <bskeggs@redhat.com>
Tue, 17 Sep 2019 04:50:16 +0000 (14:50 +1000)
When the last reference to a TTM BO is dropped, ttm_bo_release() will
acquire the DMA reservation object's wound/wait mutex while trying to
clean up (ttm_bo_cleanup_refs_or_queue() via ttm_bo_release()). It is
therefore essential that drm_gem_object_release() be called after the
TTM BO has been uninitialized, otherwise drm_gem_object_release() has
already destroyed the wound/wait mutex (via dma_resv_fini()).

Signed-off-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c

index e7803dca32c5f31d7531a4e46633bed220abecd8..f8015e0318d71fbd5d8c199a6cee40c15922e279 100644 (file)
@@ -136,10 +136,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
        struct drm_device *dev = drm->dev;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       if (unlikely(nvbo->bo.base.filp))
-               DRM_ERROR("bo %p still attached to GEM object\n", bo);
        WARN_ON(nvbo->pin_refcnt > 0);
        nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+
+       /*
+        * If nouveau_bo_new() allocated this buffer, the GEM object was never
+        * initialized, so don't attempt to release it.
+        */
+       if (bo->base.dev)
+               drm_gem_object_release(&bo->base);
+
        kfree(nvbo);
 }
 
index 1bdffd7144567c4fa3a00d1e4107f51ffadd67ec..1324c19f4e5cfa4ea10a966ab4a5d1f08c434d82 100644 (file)
@@ -51,10 +51,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
 
-       drm_gem_object_release(gem);
-
-       /* reset filp so nouveau_bo_del_ttm() can test for it */
-       gem->filp = NULL;
        ttm_bo_put(&nvbo->bo);
 
        pm_runtime_mark_last_busy(dev);