return r;
}
-void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
adev->gfx.ras_if)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
{
if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini)
- adev->umc.ras->ras_block.ras_fini(adev);
+ adev->umc.ras->ras_block.ras_fini(adev, NULL);
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini)
- adev->mmhub.ras->ras_block.ras_fini(adev);
+ adev->mmhub.ras->ras_block.ras_fini(adev, NULL);
if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini)
- adev->gmc.xgmi.ras->ras_block.ras_fini(adev);
+ adev->gmc.xgmi.ras->ras_block.ras_fini(adev, NULL);
if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini)
- adev->hdp.ras->ras_block.ras_fini(adev);
+ adev->hdp.ras->ras_block.ras_fini(adev, NULL);
}
/*
#include "amdgpu.h"
#include "amdgpu_ras.h"
-void amdgpu_hdp_ras_fini(struct amdgpu_device *adev)
+void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) &&
adev->hdp.ras_if)
};
int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-void amdgpu_hdp_ras_fini(struct amdgpu_device *adev);
+void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif /* __AMDGPU_HDP_H__ */
#include "amdgpu.h"
#include "amdgpu_ras.h"
-void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
adev->mmhub.ras_if)
struct amdgpu_mmhub_ras *ras;
};
-void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
return r;
}
-void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
adev->nbio.ras_if)
};
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
enum amdgpu_ras_block block, uint32_t sub_block_index);
int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
- void (*ras_fini)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
ras_ih_cb ras_cb;
const struct amdgpu_ras_block_hw_ops *hw_ops;
};
return r;
}
-void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
adev->sdma.ras_if)
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
-void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
return r;
}
-void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
adev->umc.ras_if)
};
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
void *ras_error_status,
bool reset);
return amdgpu_ras_block_late_init(adev, ras_block);
}
-static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
adev->gmc.xgmi.ras_if)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini)
- adev->gfx.ras->ras_block.ras_fini(adev);
+ adev->gfx.ras->ras_block.ras_fini(adev, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
ras_error_status);
}
-static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
+static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
}
ras_error_status);
}
-static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
+static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
}
ras_error_status);
}
-static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
+static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
}
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
adev->sdma.ras->ras_block.ras_fini)
- adev->sdma.ras->ras_block.ras_fini(adev);
+ adev->sdma.ras->ras_block.ras_fini(adev, NULL);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini)
- adev->nbio.ras->ras_block.ras_fini(adev);
+ adev->nbio.ras->ras_block.ras_fini(adev, NULL);
if (adev->df.funcs &&
adev->df.funcs->sw_fini)