true);
ret = unreserve_bo_and_vms(&ctx, false, false);
- /* Only apply no TLB flush on Aldebaran to
- * workaround regressions on other Asics.
- */
- if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
- *table_freed = true;
-
goto out;
out_unreserve:
return ret;
}
+static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) {
+ return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
+ dev->adev->sdma.instance[0].fw_version >= 18);
+}
+
static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed) {
+ if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
for (i = 0; i < args->n_devices; i++) {
peer = kfd_device_by_id(devices_arr[i]);
if (WARN_ON_ONCE(!peer))
}
mutex_unlock(&p->mutex);
- if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) {
+ if (kfd_flush_tlb_after_unmap(dev)) {
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
(struct kgd_mem *) mem, true);
if (err) {