#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_smu.h"
#include "amdgpu_discovery.h"
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
/* powerplay */
struct amd_powerplay powerplay;
-
- /* smu */
- struct smu_context smu;
-
- /* dpm */
struct amdgpu_pm pm;
u32 cg_flags;
u32 pg_flags;
#define __KGD_PP_INTERFACE_H__
extern const struct amdgpu_ip_block_version pp_smu_ip_block;
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
+#include "amdgpu_smu.h"
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev))
return smu_mode1_reset_is_support(smu);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev))
return smu_mode1_reset(smu);
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev))
return smu_allow_xgmi_power_down(smu, en);
mutex_unlock(&adev->pm.mutex);
if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
+ smu_set_ac_dc(adev->powerplay.pp_handle);
}
}
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
{
- return smu_handle_passthrough_sbr(&adev->smu, enable);
+ return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable);
}
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
{
- return smu_send_hbm_bad_pages_num(&adev->smu, size);
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_send_hbm_bad_pages_num(smu, size);
}
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
switch (type) {
case PP_SCLK:
- return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max);
+ return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max);
default:
return -EINVAL;
}
uint32_t min,
uint32_t max)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
switch (type) {
case PP_SCLK:
- return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max);
+ return smu_set_soft_freq_range(smu, SMU_SCLK, min, max);
default:
return -EINVAL;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
if (!is_support_sw_smu(adev))
return 0;
- return smu_write_watermarks_table(&adev->smu);
+ return smu_write_watermarks_table(smu);
}
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
enum smu_event_type event,
uint64_t event_arg)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- return smu_wait_for_event(&adev->smu, event, event_arg);
+ return smu_wait_for_event(smu, event, event_arg);
}
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- return smu_get_status_gfxoff(&adev->smu, value);
+ return smu_get_status_gfxoff(smu, value);
}
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
{
- return atomic64_read(&adev->smu.throttle_int_counter);
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return atomic64_read(&smu->throttle_int_counter);
}
/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
void *umc_ecc)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
- return smu_get_ecc_info(&adev->smu, umc_ecc);
+ return smu_get_ecc_info(smu, umc_ecc);
}
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (is_support_sw_smu(adev) && adev->smu.is_apu) ||
+ if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+ (is_support_sw_smu(adev) && smu->is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true;
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
{
- return adev->smu.cpu_core_num;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu->cpu_core_num;
}
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
int limit_type = to_sensor_dev_attr(attr)->index;
return sysfs_emit(buf, "%s\n",
- limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+ limit_type == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
extern const struct amd_ip_funcs smu_ip_funcs;
-extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
-
bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_write_watermarks_table(struct smu_context *smu);
bool is_support_cclk_dpm(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
return false;
static int smu_set_funcs(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu;
+
+ smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
+ if (!smu)
+ return -ENOMEM;
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
smu_set_fine_grain_gfx_freq_parameters(smu);
smu_get_fan_parameters(smu);
- smu_handle_task(&adev->smu,
+ smu_handle_task(smu,
smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT,
false);
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
static int smu_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
ret = smu_smc_table_sw_fini(smu);
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
smu->pm_enabled = false;
if (smu->is_apu) {
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
}
if (!smu->pm_enabled)
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
return smu_smc_hw_cleanup(smu);
}
+static void smu_late_fini(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ kfree(smu);
+}
+
static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
static int smu_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- smu_set_gfx_cgpg(&adev->smu, false);
+ smu_set_gfx_cgpg(smu, false);
return 0;
}
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
return ret;
}
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
smu->disable_uclk_switch = 0;
.sw_fini = smu_sw_fini,
.hw_init = smu_hw_init,
.hw_fini = smu_hw_fini,
+ .late_fini = smu_late_fini,
.suspend = smu_suspend,
.resume = smu_resume,
.is_idle = NULL,
static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
unsigned char *buf;
int r;
loff_t *pos)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!filp->private_data)
{
#if defined(CONFIG_DEBUG_FS)
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu->stb_context.stb_buf_size)
return;
&smu_stb_debugfs_fops,
smu->stb_context.stb_buf_size);
#endif
-
}
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
struct i2c_msg *msg, int num_msgs)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&smu->mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&smu->mutex);
if (r)
goto fail;
struct i2c_msg *msg, int num_msgs)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&smu->mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&smu->mutex);
if (r)
goto fail;
struct i2c_msg *msg, int num_msgs)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&smu->mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&smu->mutex);
if (r)
goto fail;
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
struct i2c_msg *msg, int num_msgs)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&smu->mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&smu->mutex);
if (r)
goto fail;
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x7:
/*