} while (fault->timestamp < tmp);
}
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ adev->gmc.xgmi.ras = &xgmi_ras;
+ amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
+ }
+
+ return 0;
+}
+
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
{
int r;
return r;
}
- if (!adev->gmc.xgmi.connected_to_cpu) {
- adev->gmc.xgmi.ras = &xgmi_ras;
- amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
- }
-
if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) {
r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, NULL);
if (r)
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
static int gmc_v10_0_early_init(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ r = amdgpu_gmc_ras_early_init(adev);
+ if (r)
+ return r;
+
return 0;
}
static int gmc_v9_0_early_init(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ r = amdgpu_gmc_ras_early_init(adev);
+ if (r)
+ return r;
+
return 0;
}