dma-buf: rename and cleanup dma_resv_get_excl v3
authorChristian König <christian.koenig@amd.com>
Mon, 10 May 2021 14:14:09 +0000 (16:14 +0200)
committerChristian König <christian.koenig@amd.com>
Sun, 6 Jun 2021 09:17:58 +0000 (11:17 +0200)
When the comment needs to state explicitly that this
doesn't get a reference to the object then the function
is named rather badly.

Rename the function and use rcu_dereference_check(), this
way it can be used from both rcu as well as lock protected
critical sections.

v2: improve kerneldoc as suggested by Daniel
v3: use dma_resv_excl_fence as function name

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-4-christian.koenig@amd.com
15 files changed:
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-resv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/i915/gem/i915_gem_busy.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_sync.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/linux/dma-resv.h

index ee04fb4420156c88101e3ea7bc5e5bc8a9e94907..d419cf90ee73aa8a348128d10cff695c5d6dabdb 100644 (file)
@@ -234,7 +234,7 @@ retry:
                shared_count = fobj->shared_count;
        else
                shared_count = 0;
-       fence_excl = rcu_dereference(resv->fence_excl);
+       fence_excl = dma_resv_excl_fence(resv);
        if (read_seqcount_retry(&resv->seq, seq)) {
                rcu_read_unlock();
                goto retry;
@@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
                                buf_obj->name ?: "");
 
                robj = buf_obj->resv;
-               fence = rcu_dereference_protected(robj->fence_excl,
-                                                 dma_resv_held(robj));
+               fence = dma_resv_excl_fence(robj);
                if (fence)
                        seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
                                   fence->ops->get_driver_name(fence),
index 6132ba631991607e66c007193abba8591f6352b2..ed7b4e8f002ffa07bff45fe6365fc72dc7586e79 100644 (file)
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
  */
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
-       struct dma_fence *old_fence = dma_resv_get_excl(obj);
+       struct dma_fence *old_fence = dma_resv_excl_fence(obj);
        struct dma_resv_list *old;
        u32 i = 0;
 
@@ -380,7 +380,7 @@ retry:
        rcu_read_unlock();
 
        src_list = dma_resv_get_list(dst);
-       old = dma_resv_get_excl(dst);
+       old = dma_resv_excl_fence(dst);
 
        write_seqcount_begin(&dst->seq);
        /* write_seqcount_begin provides the necessary memory barrier */
@@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
                rcu_read_lock();
                seq = read_seqcount_begin(&obj->seq);
 
-               fence_excl = rcu_dereference(obj->fence_excl);
+               fence_excl = dma_resv_excl_fence(obj);
                if (fence_excl && !dma_fence_get_rcu(fence_excl))
                        goto unlock;
 
@@ -523,7 +523,7 @@ retry:
        rcu_read_lock();
        i = -1;
 
-       fence = rcu_dereference(obj->fence_excl);
+       fence = dma_resv_excl_fence(obj);
        if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
                if (!dma_fence_get_rcu(fence))
                        goto unlock_retry;
@@ -645,7 +645,7 @@ retry:
        }
 
        if (!shared_count) {
-               struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+               struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
 
                if (fence_excl) {
                        ret = dma_resv_test_signaled_single(fence_excl);
index 73c76a3e2b12a0f89de760b5a999a8577f800ca2..7d5aaf584634808062a8305664c0ca24f8a38456 100644 (file)
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
 
-       fence = dma_resv_get_excl(bo->tbo.base.resv);
+       fence = dma_resv_excl_fence(bo->tbo.base.resv);
        if (fence) {
                amdgpu_bo_fence(bo, fence, true);
                fence = NULL;
index 4e558632a5d2bec6eccaba5e176be0e7ace4c666..2bdc9df5c6b95caed44f3b4ac1f043b6e66c829a 100644 (file)
@@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                return -EINVAL;
 
        /* always sync to the exclusive fence */
-       f = dma_resv_get_excl(resv);
+       f = dma_resv_excl_fence(resv);
        r = amdgpu_sync_fence(sync, f);
 
        flist = dma_resv_get_list(resv);
index db69f19ab5bcad8790d4f35760e3d019dfff85e0..2237fe5204d05884daffef7b9afa37105a9f4afa 100644 (file)
@@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
                }
        }
 
-       fence = rcu_dereference(robj->fence_excl);
+       fence = dma_resv_excl_fence(robj);
        if (fence)
                etnaviv_gem_describe_fence(fence, "Exclusive", m);
        rcu_read_unlock();
index 25235ef630c10829b1041077f71af241d37a8f70..088d375b3395457d4a0656c557acfbfd5453b553 100644 (file)
@@ -113,8 +113,7 @@ retry:
        seq = raw_read_seqcount(&obj->base.resv->seq);
 
        /* Translate the exclusive fence to the READ *and* WRITE engine */
-       args->busy =
-               busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+       args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
 
        /* Translate shared fences to READ set of engines */
        list = rcu_dereference(obj->base.resv->fence);
index 56df86e5f74003e031d08c824d145fb2e8d34b14..a5a2a922e3e8cf1896d174dddbb5ba52667ddc5e 100644 (file)
@@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 
        fobj = dma_resv_get_list(obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
-               fence = dma_resv_get_excl(obj->resv);
+               fence = dma_resv_excl_fence(obj->resv);
                /* don't need to wait on our own fences, since ring is fifo */
                if (fence && (fence->context != fctx->context)) {
                        ret = dma_fence_wait(fence, true);
@@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
                }
        }
 
-       fence = rcu_dereference(robj->fence_excl);
+       fence = dma_resv_excl_fence(robj);
        if (fence)
                describe_fence(fence, "Exclusive", m);
        rcu_read_unlock();
index c3d20bc800227ae9393a77000416b4e0e949c1c7..520b1ea9d16c7fb5cf6f8748b9bddde2e9a88654 100644 (file)
@@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct drm_device *dev = drm->dev;
-       struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+       struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
 
        nv10_bo_put_tile_region(dev, *old_tile, fence);
        *old_tile = new_tile;
index e5dcbf67de7eff81c9714355549f85635f3486bd..19c096de5bdcde3b03b1db74000c87dcb1cbd85d 100644 (file)
@@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
        }
 
        fobj = dma_resv_get_list(resv);
-       fence = dma_resv_get_excl(resv);
+       fence = dma_resv_excl_fence(resv);
 
        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
                struct nouveau_channel *prev = NULL;
index 652af7a134bd0bd9e92ece804430da2671c9caea..4066813174194d11f5497e8c8e6795b10a50810b 100644 (file)
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
                goto cleanup;
        }
-       work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+       work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
        radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(new_rbo);
 
index 5d3302945076bc7860bd526756812f43ba9892fc..c8a1711325de2937b38489bf9c1285107fef8ae7 100644 (file)
@@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
        int r = 0;
 
        /* always sync to the exclusive fence */
-       f = dma_resv_get_excl(resv);
+       f = dma_resv_excl_fence(resv);
        fence = f ? to_radeon_fence(f) : NULL;
        if (fence && fence->rdev == rdev)
                radeon_sync_fence(sync, fence);
index dfa9fdbe98da25fc2360769b583419405b7f962f..1f5b1a5c0a092e89109658e002e63f4b44f4f525 100644 (file)
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return -EINVAL;
        }
 
-       f = dma_resv_get_excl(bo->tbo.base.resv);
+       f = dma_resv_excl_fence(bo->tbo.base.resv);
        if (f) {
                r = radeon_fence_wait((struct radeon_fence *)f, false);
                if (r) {
index 4ed56520b81d7e6bea2acedf34854313c37e0d28..1752f8e523e7b680a97a8f5c30f695e92aa6d56a 100644 (file)
@@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 
        rcu_read_lock();
        fobj = rcu_dereference(resv->fence);
-       fence = rcu_dereference(resv->fence_excl);
+       fence = dma_resv_excl_fence(resv);
        if (fence && !fence->ops->signaled)
                dma_fence_enable_sw_signaling(fence);
 
index 62ea920addc305a7235b48b700bebe8e14915323..7b45393ad98e923bafc462ea5222fa535781f8d9 100644 (file)
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
                if (bo->moving)
                        dma_fence_put(bo->moving);
                bo->moving = dma_fence_get
-                       (dma_resv_get_excl(bo->base.resv));
+                       (dma_resv_excl_fence(bo->base.resv));
        }
 
        return 0;
index f32a3d176513dcbfe43bd26d9d605998097d4a12..e3a7f740bb06863565010d3d665949fe0f94d685 100644 (file)
@@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 }
 
 /**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
+ * dma_resv_exclusive - return the object's exclusive fence
  * @obj: the reservation object
  *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
  *
  * RETURNS
  * The exclusive fence or NULL
  */
 static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
+dma_resv_excl_fence(struct dma_resv *obj)
 {
-       return rcu_dereference_protected(obj->fence_excl,
-                                        dma_resv_held(obj));
+       return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
 /**