Replace it with related interface in gfxhub functions.
v2: replace node id with xcc id.
get node id for query_utcl2_poison_status
Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id)
{
- if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
- return adev->gfx.ras->query_utcl2_poison_status(adev);
+ if (adev->gfxhub.funcs->query_utcl2_poison_status)
+ return adev->gfxhub.funcs->query_utcl2_poison_status(adev, xcc_id);
else
return false;
}
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
mutex_unlock(&adev->grbm_idx_mutex);
}
-static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
.hw_ops = &gfx_v9_4_2_ras_ops,
},
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
- .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
};
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int xcc_id = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (xcc_id < 0)
+ xcc_id = 0;
+ }
+
+ if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type &&
+ amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) {
event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int xcc_id = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (xcc_id < 0)
+ xcc_id = 0;
+ }
+
+ if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type &&
+ amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}