AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+ AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM = 0x200,
};
enum amd_pm_state_type {
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+ (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
"unknown");
}
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
} else {
return -EINVAL;
}
*/
#define AMDGPU_MASK_BUF_MAX (32 * 13)
+static int amdgpu_read_clk(const char *buf,
+ size_t count,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret;
+ char *tmp;
+ char *token = NULL;
+ char *tag;
+ char *value;
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ size_t bytes;
+ int i = 0;
+
+ bytes = min(count, sizeof(buf_cpy) - 1);
+ memcpy(buf_cpy, buf, bytes);
+ buf_cpy[bytes] = '\0';
+ tmp = buf_cpy;
+
+ *min = *max = 0;
+ while (i < 2) {
+ ret = -EINVAL;
+ token = strsep(&tmp, delimiter);
+ if (!token || !*token)
+ break;
+ tag = token;
+
+ token = strsep(&tmp, delimiter);
+ if (!token || !*token)
+ break;
+ value = token;
+
+ if (!strncmp(tag, "min", strlen("min")))
+ ret = kstrtou32(value, 0, min);
+ else if (!strncmp(tag, "max", strlen("max")))
+ ret = kstrtou32(value, 0, max);
+
+ if (ret)
+ break;
+ ++i;
+ }
+
+ /* should get a non-zero value for min or max */
+ if (!*min && !*max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int amdgpu_set_clk_minmax(struct amdgpu_device *adev,
+ uint32_t clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ if (!is_support_sw_smu(adev) || amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return ret;
+ }
+
+ ret = smu_set_soft_freq_range(&adev->smu, clk_type, min, max);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
+ uint32_t min;
+ uint32_t max;
if (amdgpu_in_reset(adev))
return -EPERM;
+ ret = amdgpu_read_clk(buf, count, &min, &max);
+ if (!ret) {
+ ret = amdgpu_set_clk_minmax(adev, SMU_GFXCLK, min, max);
+ return ret ? ret:count;
+ }
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
smu_dpm_ctx->dpm_level = level;
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload = smu->workload_setting[index];
workload = smu->workload_setting[index];
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, &workload, 0);
mutex_unlock(&smu->mutex);
dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
return -EINVAL;
}
-
power_limit = pptable->PptLimit;
}
static int aldebaran_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+
+ /* Disable determinism if switching to another mode */
+ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+
+
switch (level) {
+
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ return 0;
+
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
return smu_v13_0_set_performance_level(smu, level);
}
+static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+ (max > dpm_context->dpm_tables.gfx_table.max)) {
+ dev_warn(adev->dev,
+ "Invalid max frequency %d MHz specified for determinism\n", max);
+ return -EINVAL;
+ }
+
+ /* Restore default min/max clocks and enable determinism */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ if (!ret) {
+ usleep_range(500, 1000);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnableDeterminism,
+ max, NULL);
+ if (ret)
+ dev_err(adev->dev,
+ "Failed to enable determinism at GFX clock %d MHz\n", max);
+ }
+ }
+
+ return ret;
+}
+
static bool aldebaran_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.baco_is_support= aldebaran_is_baco_supported,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
- .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
+ .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.set_df_cstate = aldebaran_set_df_cstate,
.allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,