drm/amdgpu: add amdgpu runpm usage trace for separate funcs
authorPrike Liang <Prike.Liang@amd.com>
Thu, 9 Nov 2023 03:37:18 +0000 (11:37 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 17 Nov 2023 14:30:51 +0000 (09:30 -0500)
Add trace for amdgpu runpm separate funcs usage and this will
help debugging on the case of runpm usage missed to dereference.
In the normal case the runpm usage count referred by one kind
of functionality pairwise and usage should be changed from 1 to 0,
otherwise there will be an issue in the amdgpu runpm usage
dereference.

Signed-off-by: Prike Liang <Prike.Liang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

index e7e87a3b2601eb130d9eaf13d8fab0c7b5e5c4cd..decbbe3d4f06e9aa67365b68e747feeb5bad6fc1 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
 #include <linux/pm_runtime.h>
+#include "amdgpu_trace.h"
 
 /**
  * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -63,6 +64,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
                attach->peer2peer = false;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       trace_amdgpu_runpm_reference_dumps(1, __func__);
        if (r < 0)
                goto out;
 
@@ -70,6 +72,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 
 out:
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+       trace_amdgpu_runpm_reference_dumps(0, __func__);
        return r;
 }
 
@@ -90,6 +93,7 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+       trace_amdgpu_runpm_reference_dumps(0, __func__);
 }
 
 /**
index dc230212746a290661008de9298ca04f916d44b6..70bff8cecfda7e20fd8fcb93ef1637894fcd3e02 100644 (file)
@@ -183,6 +183,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
        pm_runtime_get_noresume(adev_to_drm(adev)->dev);
+       trace_amdgpu_runpm_reference_dumps(1, __func__);
        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
        if (unlikely(rcu_dereference_protected(*ptr, 1))) {
                struct dma_fence *old;
@@ -310,6 +311,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
                dma_fence_put(fence);
                pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               trace_amdgpu_runpm_reference_dumps(0, __func__);
        } while (last_seq != seq);
 
        return true;
index 2fd1bfb35916fe63f2865d4bfad4f96ecd547627..f539b1d002343ea2a79af93595919a067da52742 100644 (file)
@@ -554,6 +554,21 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
                      __entry->value)
 );
 
+TRACE_EVENT(amdgpu_runpm_reference_dumps,
+           TP_PROTO(uint32_t index, const char *func),
+           TP_ARGS(index, func),
+           TP_STRUCT__entry(
+                            __field(uint32_t, index)
+                            __string(func, func)
+                            ),
+           TP_fast_assign(
+                          __entry->index = index;
+                          __assign_str(func, func);
+                          ),
+           TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
+                     __entry->index,
+                     __get_str(func))
+);
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif