drm/amd/pm: Enable performance determinism on aldebaran
authorLijo Lazar <lijo.lazar@amd.com>
Fri, 5 Mar 2021 21:02:49 +0000 (16:02 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 24 Mar 2021 02:58:16 +0000 (22:58 -0400)
Performance Determinism is a new mode in Aldebaran where PMFW tries to
maintain sustained performance level. It can be enabled on a per-die
basis on aldebaran. To guarantee that it remains within the power cap,
a max GFX frequency needs to be specified in this mode. A new
power_dpm_force_performance_level, "perf_determinism", is defined to enable
this mode in amdgpu. The max frequency (in MHz) can be specified through
pp_dpm_sclk. The mode will be disabled once any other performance level
is chosen.

Ex: To enable perf determinism at 900Mhz max gfx clock

echo perf_determinism > /sys/bus/pci/devices/.../power_dpm_force_performance_level
echo max 900 > /sys/bus/pci/devices/.../pp_dpm_sclk

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c

index b26a6f4e5a745320dbf18f5d50acc443dbdcb190..7d5b71e6501998885522dc8fcdb3676056c662e0 100644 (file)
@@ -48,6 +48,7 @@ enum amd_dpm_forced_level {
        AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
        AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
        AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+       AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM = 0x200,
 };
 
 enum amd_pm_state_type {
index f2a64bd47f426a8fb965de3d2c32deb6c046f3cf..3cc4e6f556c3e71f63e9afada9a5bed3f9b12aeb 100644 (file)
@@ -292,6 +292,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+                       (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
                        "unknown");
 }
 
@@ -328,6 +329,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
                level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
        } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
+               level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
        }  else {
                return -EINVAL;
        }
@@ -1039,6 +1042,83 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
  */
 #define AMDGPU_MASK_BUF_MAX    (32 * 13)
 
+static int amdgpu_read_clk(const char *buf,
+               size_t count,
+               uint32_t *min,
+               uint32_t *max)
+{
+       int ret;
+       char *tmp;
+       char *token = NULL;
+       char *tag;
+       char *value;
+       char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+       const char delimiter[3] = {' ', '\n', '\0'};
+       size_t bytes;
+       int i = 0;
+
+       bytes = min(count, sizeof(buf_cpy) - 1);
+       memcpy(buf_cpy, buf, bytes);
+       buf_cpy[bytes] = '\0';
+       tmp = buf_cpy;
+
+       *min = *max = 0;
+       while (i < 2) {
+               ret = -EINVAL;
+               token = strsep(&tmp, delimiter);
+               if (!token || !*token)
+                       break;
+               tag = token;
+
+               token = strsep(&tmp, delimiter);
+               if (!token || !*token)
+                       break;
+               value = token;
+
+               if (!strncmp(tag, "min", strlen("min")))
+                       ret = kstrtou32(value, 0, min);
+               else if (!strncmp(tag, "max", strlen("max")))
+                       ret = kstrtou32(value, 0, max);
+
+               if (ret)
+                       break;
+               ++i;
+       }
+
+       /* should get a non-zero value for min or max */
+       if (!*min && !*max)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int amdgpu_set_clk_minmax(struct amdgpu_device *adev,
+               uint32_t clk_type,
+               uint32_t min,
+               uint32_t max)
+{
+       int ret;
+
+       if (!is_support_sw_smu(adev) || amdgpu_sriov_vf(adev))
+               return -EINVAL;
+
+       ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       if (ret < 0) {
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return ret;
+       }
+
+       ret = smu_set_soft_freq_range(&adev->smu, clk_type, min, max);
+
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+       if (ret)
+               return -EINVAL;
+
+       return 0;
+}
+
 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
 {
        int ret;
@@ -1077,10 +1157,18 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        int ret;
        uint32_t mask = 0;
+       uint32_t min;
+       uint32_t max;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
+       ret = amdgpu_read_clk(buf, count, &min, &max);
+       if (!ret) {
+               ret = amdgpu_set_clk_minmax(adev, SMU_GFXCLK, min, max);
+               return ret ? ret:count;
+       }
+
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
index 9f2482870353e678cec303e60a0d820ff1ded202..13d7fcdf76a0a151096d38f343f30547b9cba48c 100644 (file)
@@ -1663,7 +1663,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
                smu_dpm_ctx->dpm_level = level;
        }
 
-       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+               smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
                index = fls(smu->workload_mask);
                index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
                workload = smu->workload_setting[index];
@@ -1751,7 +1752,8 @@ int smu_switch_power_profile(void *handle,
                workload = smu->workload_setting[index];
        }
 
-       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+               smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
                smu_bump_power_profile_mode(smu, &workload, 0);
 
        mutex_unlock(&smu->mutex);
index 2427681fab8a18f16bd9e4f781d7c1617c0522e2..6295238586601093a27dad981ae34b33eb7e41a6 100644 (file)
@@ -1078,7 +1078,6 @@ static int aldebaran_get_power_limit(struct smu_context *smu)
                        dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
                        return -EINVAL;
                }
-
                power_limit = pptable->PptLimit;
        }
 
@@ -1103,7 +1102,19 @@ static int aldebaran_system_features_control(struct  smu_context *smu, bool enab
 static int aldebaran_set_performance_level(struct smu_context *smu,
                                           enum amd_dpm_forced_level level)
 {
+       struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+
+       /* Disable determinism if switching to another mode */
+       if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+                       && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+               smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+
+
        switch (level) {
+
+       case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+               return 0;
+
        case AMD_DPM_FORCED_LEVEL_HIGH:
        case AMD_DPM_FORCED_LEVEL_LOW:
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1117,6 +1128,50 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
        return smu_v13_0_set_performance_level(smu, level);
 }
 
+static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
+                                         enum smu_clk_type clk_type,
+                                         uint32_t min,
+                                         uint32_t max)
+{
+       struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+       struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t min_clk;
+       uint32_t max_clk;
+       int ret = 0;
+
+       if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+               return -EINVAL;
+
+       if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+               return -EINVAL;
+
+       if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+               if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+                       (max > dpm_context->dpm_tables.gfx_table.max)) {
+                       dev_warn(adev->dev,
+                                       "Invalid max frequency %d MHz specified for determinism\n", max);
+                       return -EINVAL;
+               }
+
+               /* Restore default min/max clocks and enable determinism */
+               min_clk = dpm_context->dpm_tables.gfx_table.min;
+               max_clk = dpm_context->dpm_tables.gfx_table.max;
+               ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+               if (!ret) {
+                       usleep_range(500, 1000);
+                       ret = smu_cmn_send_smc_msg_with_param(smu,
+                                       SMU_MSG_EnableDeterminism,
+                                       max, NULL);
+                       if (ret)
+                               dev_err(adev->dev,
+                                               "Failed to enable determinism at GFX clock %d MHz\n", max);
+               }
+       }
+
+       return ret;
+}
+
 static bool aldebaran_is_dpm_running(struct smu_context *smu)
 {
        int ret = 0;
@@ -1351,7 +1406,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
        .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
        .baco_is_support= aldebaran_is_baco_supported,
        .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
-       .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
+       .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
        .set_df_cstate = aldebaran_set_df_cstate,
        .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
        .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,