* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
+ * @vmid: VMID, only used for GFX 9.4.3.
+ * @node_id: Node_id received in IH cookie. Only applicable for
+ * GFX 9.4.3.
* @addr: Address of the fault
* @write_fault: true is write fault, false is read fault
*
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 client_id, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr,
bool write_fault)
{
bool is_compute_context = false;
addr /= AMDGPU_GPU_PAGE_SIZE;
- if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id,
+ if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
node_id, addr, write_fault)) {
amdgpu_bo_unref(&root);
return true;
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 client_id, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr,
bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
cam_index = entry->src_data[2] & 0x3ff;
- ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id,
+ ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
addr, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id,
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
addr, write_fault))
return 1;
}
struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev);
-static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id,
- uint32_t node_id)
+static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
+ uint32_t vmid)
{
- if ((node->interrupt_bitmap & (0x1U << node_id)) ||
- ((node_id % 4) == 0 &&
- (node->interrupt_bitmap >> 16) & (0x1U << client_id)))
- return true;
-
- return false;
+ return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
+ (node->compute_vmid_bitmap & (1 << vmid)) != 0;
}
static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
- uint32_t client_id, uint32_t node_id) {
+ uint32_t node_id, uint32_t vmid) {
struct kfd_dev *dev = adev->kfd.dev;
uint32_t i;
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)
- if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id))
+ if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid))
return dev->nodes[i];
return NULL;
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
- uint32_t client_id, uint32_t node_id,
+ uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
struct mm_struct *mm = NULL;
goto out;
}
- node = kfd_node_by_irq_ids(adev, node_id, client_id);
+ node = kfd_node_by_irq_ids(adev, node_id, vmid);
if (!node) {
- pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id,
- client_id);
+ pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
+ vmid);
r = -EFAULT;
goto out;
}
unsigned long addr, struct svm_range *parent,
struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
- uint32_t client_id, uint32_t node_id, uint64_t addr,
+ uint32_t vmid, uint32_t node_id, uint64_t addr,
bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,