This allows us to trace all VM ranges which should be valid inside a CS.
v2: dump mappings without BO as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming  Zhou <david1.zhou@amd.com>
Reviewed-and-tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> (v1)
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                            union drm_amdgpu_cs *cs)
 {
+       struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_ring *ring = p->ring;
        struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
        enum drm_sched_priority priority;
        amdgpu_job_free_resources(job);
 
        trace_amdgpu_cs_ioctl(job);
+       amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
        priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
 
            TP_ARGS(mapping)
 );
 
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
 TRACE_EVENT(amdgpu_vm_set_ptes,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint64_t flags),
 
        return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
 }
 
+/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+       struct amdgpu_bo_va_mapping *mapping;
+
+       if (!trace_amdgpu_vm_bo_cs_enabled())
+               return;
+
+       for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+            mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+               if (mapping->bo_va && mapping->bo_va->base.bo) {
+                       struct amdgpu_bo *bo;
+
+                       bo = mapping->bo_va->base.bo;
+                       if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+                               continue;
+               }
+
+               trace_amdgpu_vm_bo_cs(mapping);
+       }
+}
+
 /**
  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
  *
 
                                uint64_t saddr, uint64_t size);
 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
                                                         uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,