drm/amdgpu: Read aquavanjaram XGMI register state
authorLijo Lazar <lijo.lazar@amd.com>
Sat, 7 Oct 2023 10:11:27 +0000 (15:41 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 29 Nov 2023 21:49:35 +0000 (16:49 -0500)
Add support to read state of XGMI links in aquavanjaram SOC.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c

index 0a4598480dd552fbe1fbf7b06a9c66460e57c79d..a00b8c6f0a942e8d822c8882d31a761a4b5ee028 100644 (file)
@@ -674,6 +674,15 @@ struct aqua_reg_list {
 
 #define DW_ADDR_INCR   4
 
+static void aqua_read_smn_ext(struct amdgpu_device *adev,
+                             struct amdgpu_smn_reg_data *regdata,
+                             uint64_t smn_addr, int i)
+{
+       regdata->addr =
+               smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
+       regdata->value = RREG32_PCIE_EXT(regdata->addr);
+}
+
 #define smnreg_0x1A340218      0x1A340218
 #define smnreg_0x1A3402E4      0x1A3402E4
 #define smnreg_0x1A340294      0x1A340294
@@ -755,6 +764,90 @@ static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
        return pcie_reg_state->common_header.structure_size;
 }
 
+#define smnreg_0x11A00050      0x11A00050
+#define smnreg_0x11A00180      0x11A00180
+#define smnreg_0x11A00070      0x11A00070
+#define smnreg_0x11A00200      0x11A00200
+#define smnreg_0x11A0020C      0x11A0020C
+#define smnreg_0x11A00210      0x11A00210
+#define smnreg_0x11A00108      0x11A00108
+
+#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_XGMI_SMN_REGS 25
+
+static struct aqua_reg_list xgmi_reg_addrs[] = {
+       { smnreg_0x11A00050, 1, 0 },
+       { smnreg_0x11A00180, 16, DW_ADDR_INCR },
+       { smnreg_0x11A00070, 4, DW_ADDR_INCR },
+       { smnreg_0x11A00200, 1, 0 },
+       { smnreg_0x11A0020C, 1, 0 },
+       { smnreg_0x11A00210, 1, 0 },
+       { smnreg_0x11A00108, 1, 0 },
+};
+
+static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
+                                            void *buf, size_t max_size)
+{
+       struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
+       uint32_t start_addr, incrx, num_regs, szbuf;
+       struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
+       struct amdgpu_smn_reg_data *reg_data;
+       const int max_xgmi_instances = 8;
+       int inst = 0, i, j, r, n;
+       const int xgmi_inst = 2;
+       void *p;
+
+       if (!buf || !max_size)
+               return -EINVAL;
+
+       xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
+
+       szbuf = sizeof(*xgmi_reg_state) +
+               amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
+                                   NUM_XGMI_SMN_REGS);
+       /* Only one instance of pcie regs */
+       if (max_size < szbuf)
+               return -EOVERFLOW;
+
+       p = &xgmi_reg_state->xgmi_state_regs[0];
+       for_each_inst(i, adev->aid_mask) {
+               for (j = 0; j < xgmi_inst; ++j) {
+                       xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
+                       xgmi_regs->inst_header.instance = inst++;
+
+                       xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
+                       xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
+
+                       reg_data = xgmi_regs->smn_reg_values;
+
+                       for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
+                               start_addr = xgmi_reg_addrs[r].start_addr;
+                               incrx = xgmi_reg_addrs[r].incrx;
+                               num_regs = xgmi_reg_addrs[r].num_regs;
+
+                               for (n = 0; n < num_regs; n++) {
+                                       aqua_read_smn_ext(
+                                               adev, reg_data,
+                                               XGMI_LINK_REG(start_addr, j) +
+                                                       n * incrx,
+                                               i);
+                                       ++reg_data;
+                               }
+                       }
+                       p = reg_data;
+               }
+       }
+
+       xgmi_reg_state->common_header.structure_size = szbuf;
+       xgmi_reg_state->common_header.format_revision = 1;
+       xgmi_reg_state->common_header.content_revision = 0;
+       xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
+       xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
+
+       return xgmi_reg_state->common_header.structure_size;
+}
+
 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
                                    enum amdgpu_reg_state reg_state, void *buf,
                                    size_t max_size)
@@ -765,6 +858,9 @@ ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
        case AMDGPU_REG_STATE_TYPE_PCIE:
                size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
                break;
+       case AMDGPU_REG_STATE_TYPE_XGMI:
+               size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
+               break;
        default:
                return -EINVAL;
        }