drm/msm: Protect ring->submits with it's own lock
authorRob Clark <robdclark@chromium.org>
Fri, 23 Oct 2020 16:51:16 +0000 (09:51 -0700)
committerRob Clark <robdclark@chromium.org>
Thu, 5 Nov 2020 00:00:56 +0000 (16:00 -0800)
One less place to rely on dev->struct_mutex.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Jordan Crouse <jcrouse@codeaurora.org>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: Rob Clark <robdclark@chromium.org>
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/msm/msm_ringbuffer.h

index 044e9bee70a25983a2257eed1e4186d6453366a8..24ce4c65429d4514aa1697de5cf1f017de9c1717 100644 (file)
@@ -65,7 +65,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
        unsigned i;
 
        dma_fence_put(submit->fence);
+       spin_lock(&submit->ring->submit_lock);
        list_del(&submit->node);
+       spin_unlock(&submit->ring->submit_lock);
        put_pid(submit->pid);
        msm_submitqueue_put(submit->queue);
 
index 93965f2119db8b2cc5cc7c29ddf7be96b5dfb773..6e6b170b9e5d1a5b427187a94101c52663d5c99c 100644 (file)
@@ -267,6 +267,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 {
        struct msm_gem_submit *submit;
 
+       spin_lock(&ring->submit_lock);
        list_for_each_entry(submit, &ring->submits, node) {
                if (submit->seqno > fence)
                        break;
@@ -274,6 +275,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
                msm_update_fence(submit->ring->fctx,
                        submit->fence->seqno);
        }
+       spin_unlock(&ring->submit_lock);
 }
 
 #ifdef CONFIG_DEV_COREDUMP
@@ -429,11 +431,14 @@ find_submit(struct msm_ringbuffer *ring, uint32_t fence)
 {
        struct msm_gem_submit *submit;
 
-       WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
-
-       list_for_each_entry(submit, &ring->submits, node)
-               if (submit->seqno == fence)
+       spin_lock(&ring->submit_lock);
+       list_for_each_entry(submit, &ring->submits, node) {
+               if (submit->seqno == fence) {
+                       spin_unlock(&ring->submit_lock);
                        return submit;
+               }
+       }
+       spin_unlock(&ring->submit_lock);
 
        return NULL;
 }
@@ -530,8 +535,10 @@ static void recover_worker(struct kthread_work *work)
                for (i = 0; i < gpu->nr_rings; i++) {
                        struct msm_ringbuffer *ring = gpu->rb[i];
 
+                       spin_lock(&ring->submit_lock);
                        list_for_each_entry(submit, &ring->submits, node)
                                gpu->funcs->submit(gpu, submit);
+                       spin_unlock(&ring->submit_lock);
                }
        }
 
@@ -717,7 +724,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 static void retire_submits(struct msm_gpu *gpu)
 {
        struct drm_device *dev = gpu->dev;
-       struct msm_gem_submit *submit, *tmp;
        int i;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -726,9 +732,24 @@ static void retire_submits(struct msm_gpu *gpu)
        for (i = 0; i < gpu->nr_rings; i++) {
                struct msm_ringbuffer *ring = gpu->rb[i];
 
-               list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
-                       if (dma_fence_is_signaled(submit->fence))
+               while (true) {
+                       struct msm_gem_submit *submit = NULL;
+
+                       spin_lock(&ring->submit_lock);
+                       submit = list_first_entry_or_null(&ring->submits,
+                                       struct msm_gem_submit, node);
+                       spin_unlock(&ring->submit_lock);
+
+                       /*
+                        * If no submit, we are done.  If submit->fence hasn't
+                        * been signalled, then later submits are not signalled
+                        * either, so we are also done.
+                        */
+                       if (submit && dma_fence_is_signaled(submit->fence)) {
                                retire_submit(gpu, ring, submit);
+                       } else {
+                               break;
+                       }
                }
        }
 }
@@ -770,7 +791,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        submit->seqno = ++ring->seqno;
 
+       spin_lock(&ring->submit_lock);
        list_add_tail(&submit->node, &ring->submits);
+       spin_unlock(&ring->submit_lock);
 
        msm_rd_dump_submit(priv->rd, submit, NULL);
 
index 1b6958e908dca7fcfa4023d3a2353c90169ffe3c..4d2a2a4abef8b0308aceaf55775bc16adce86a69 100644 (file)
@@ -46,6 +46,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
        ring->memptrs_iova = memptrs_iova;
 
        INIT_LIST_HEAD(&ring->submits);
+       spin_lock_init(&ring->submit_lock);
        spin_lock_init(&ring->preempt_lock);
 
        snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
index 4956d1bc5d0e12c18d53dbe67c3098c1bab63035..fe55d4a1aa165af5ed648f611795ce4a95cceaa5 100644 (file)
@@ -39,7 +39,13 @@ struct msm_ringbuffer {
        int id;
        struct drm_gem_object *bo;
        uint32_t *start, *end, *cur, *next;
+
+       /*
+        * List of in-flight submits on this ring.  Protected by submit_lock.
+        */
        struct list_head submits;
+       spinlock_t submit_lock;
+
        uint64_t iova;
        uint32_t seqno;
        uint32_t hangcheck_fence;