drm/amdkfd: Update SMI events for GFX9.4.3
authorMukul Joshi <mukul.joshi@amd.com>
Tue, 9 Aug 2022 18:56:53 +0000 (14:56 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 9 Jun 2023 13:46:31 +0000 (09:46 -0400)
On GFX 9.4.3, there can be multiple KFD nodes. As a result,
SMI events for SVM, queue evict/restore should be raised for
each node independently.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index e7e5abc32c84c8d9ca6007988522b89329f0b916..42e599912e52b5f898f1b7c2b8d5999728366795 100644 (file)
@@ -423,9 +423,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
        migrate.dst = migrate.src + npages;
        scratch = (dma_addr_t *)(migrate.dst + npages);
 
-       kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+       kfd_smi_event_migration_start(node, p->lead_thread->pid,
                                      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
-                                     0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc,
+                                     0, node->id, prange->prefetch_loc,
                                      prange->preferred_loc, trigger);
 
        r = migrate_vma_setup(&migrate);
@@ -456,9 +456,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
        svm_migrate_copy_done(adev, mfence);
        migrate_vma_finalize(&migrate);
 
-       kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+       kfd_smi_event_migration_end(node, p->lead_thread->pid,
                                    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
-                                   0, adev->kfd.dev->nodes[0]->id, trigger);
+                                   0, node->id, trigger);
 
        svm_range_dma_unmap(adev->dev, scratch, 0, npages);
        svm_range_free_dma_mappings(prange);
@@ -702,9 +702,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
        migrate.fault_page = fault_page;
        scratch = (dma_addr_t *)(migrate.dst + npages);
 
-       kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+       kfd_smi_event_migration_start(node, p->lead_thread->pid,
                                      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
-                                     adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc,
+                                     node->id, 0, prange->prefetch_loc,
                                      prange->preferred_loc, trigger);
 
        r = migrate_vma_setup(&migrate);
@@ -738,9 +738,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
        svm_migrate_copy_done(adev, mfence);
        migrate_vma_finalize(&migrate);
 
-       kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+       kfd_smi_event_migration_end(node, p->lead_thread->pid,
                                    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
-                                   adev->kfd.dev->nodes[0]->id, 0, trigger);
+                                   node->id, 0, trigger);
 
        svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 
index 666815b227a80d10ae50694a281ef0bb5a99ac31..a6ff57f11472573c7a0b99baf7f246a7185bb869 100644 (file)
@@ -1817,7 +1817,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
        for (i = 0; i < p->n_pdds; i++) {
                struct kfd_process_device *pdd = p->pdds[i];
 
-               kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid,
+               kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
                                             trigger);
 
                r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
@@ -1845,7 +1845,7 @@ fail:
                if (n_evicted == 0)
                        break;
 
-               kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid);
+               kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
 
                if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
                                                              &pdd->qpd))
@@ -1866,7 +1866,7 @@ int kfd_process_restore_queues(struct kfd_process *p)
        for (i = 0; i < p->n_pdds; i++) {
                struct kfd_process_device *pdd = p->pdds[i];
 
-               kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid);
+               kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
 
                r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
                                                              &pdd->qpd);
index b703da59e0677ac7606815d96e0e9e81b336af5a..d9953c2b2661445524c6444118f8c31822831f46 100644 (file)
@@ -250,58 +250,58 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
                          task_info.pid, task_info.task_name);
 }
 
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
                                    unsigned long address, bool write_fault,
                                    ktime_t ts)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
                          "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
-                         address, dev->nodes[0]->id, write_fault ? 'W' : 'R');
+                         address, node->id, write_fault ? 'W' : 'R');
 }
 
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
                                  unsigned long address, bool migration)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
                          "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
-                         pid, address, dev->nodes[0]->id, migration ? 'M' : 'U');
+                         pid, address, node->id, migration ? 'M' : 'U');
 }
 
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
                                   unsigned long start, unsigned long end,
                                   uint32_t from, uint32_t to,
                                   uint32_t prefetch_loc, uint32_t preferred_loc,
                                   uint32_t trigger)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
                          "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
                          ktime_get_boottime_ns(), pid, start, end - start,
                          from, to, prefetch_loc, preferred_loc, trigger);
 }
 
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
                                 unsigned long start, unsigned long end,
                                 uint32_t from, uint32_t to, uint32_t trigger)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
                          "%lld -%d @%lx(%lx) %x->%x %d\n",
                          ktime_get_boottime_ns(), pid, start, end - start,
                          from, to, trigger);
 }
 
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
                                  uint32_t trigger)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
                          "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
-                         dev->nodes[0]->id, trigger);
+                         node->id, trigger);
 }
 
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid)
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
                          "%lld -%d %x\n", ktime_get_boottime_ns(), pid,
-                         dev->nodes[0]->id);
+                         node->id);
 }
 
 void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
@@ -324,13 +324,13 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
        kfd_unref_process(p);
 }
 
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
                                  unsigned long address, unsigned long last,
                                  uint32_t trigger)
 {
-       kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU,
+       kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
                          "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
-                         pid, address, last - address + 1, dev->nodes[0]->id, trigger);
+                         pid, address, last - address + 1, node->id, trigger);
 }
 
 int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
index 59cd089f80d1853d4d430ad4594e7722a5cc6caa..fa95c2dfd587ffaf3b8cd536dca2e4f7615fb8d9 100644 (file)
@@ -29,24 +29,24 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid);
 void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
                                             uint64_t throttle_bitmask);
 void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset);
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
                                    unsigned long address, bool write_fault,
                                    ktime_t ts);
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
                                  unsigned long address, bool migration);
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
                             unsigned long start, unsigned long end,
                             uint32_t from, uint32_t to,
                             uint32_t prefetch_loc, uint32_t preferred_loc,
                             uint32_t trigger);
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
                             unsigned long start, unsigned long end,
                             uint32_t from, uint32_t to, uint32_t trigger);
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
                                  uint32_t trigger);
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid);
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
 void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
                                  unsigned long address, unsigned long last,
                                  uint32_t trigger);
 #endif
index 639831fbb6cafc82ba6820bae95c25b441e1d8df..0dafbbe954ca4a97d6fda7605b0a9fa6903d1f1c 100644 (file)
@@ -1274,7 +1274,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
                        return -EINVAL;
                }
 
-               kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid,
+               kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
                                             start, last, trigger);
 
                r = svm_range_unmap_from_gpu(pdd->dev->adev,
@@ -2934,7 +2934,7 @@ retry_write_locked:
                 svms, prange->start, prange->last, best_loc,
                 prange->actual_loc);
 
-       kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
+       kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
                                       write_fault, timestamp);
 
        if (prange->actual_loc != best_loc) {
@@ -2972,7 +2972,7 @@ retry_write_locked:
                pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
                         r, svms, prange->start, prange->last);
 
-       kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
+       kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
                                     migration);
 
 out_unlock_range: