Commit 1c58267c authored by Matt Coffin's avatar Matt Coffin Committed by Alex Deucher

drm/amdgpu/powerplay: Refactor SMU message handling for safety

Move the responsibility for reading argument registers into the
smu_send_smc_msg* implementations, so that adding a message-sending lock
to protect the SMU registers will result in the lock still being held
when the argument is read.

v2: transition smu_v12_0, it's asics, and vega20
Signed-off-by: default avatarMatt Coffin <mcoffin13@gmail.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2622e2ae
...@@ -121,20 +121,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu, ...@@ -121,20 +121,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
if (enabled) { if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
feature_low); feature_low, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
feature_high); feature_high, NULL);
if (ret) if (ret)
return ret; return ret;
} else { } else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
feature_low); feature_low, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
feature_high); feature_high, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -195,21 +195,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t ...@@ -195,21 +195,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return -EINVAL; return -EINVAL;
if (if_version) { if (if_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
ret = smu_read_smc_arg(smu, if_version);
if (ret) if (ret)
return ret; return ret;
} }
if (smu_version) { if (smu_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
ret = smu_read_smc_arg(smu, smu_version);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -251,7 +243,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, ...@@ -251,7 +243,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (max > 0) { if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff)); param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param); param, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -259,7 +251,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, ...@@ -259,7 +251,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min > 0) { if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff)); param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param); param, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -335,12 +327,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ ...@@ -335,12 +327,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
param); param, &param);
if (ret)
return ret;
ret = smu_read_smc_arg(smu, &param);
if (ret) if (ret)
return ret; return ret;
...@@ -542,7 +530,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int ...@@ -542,7 +530,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
ret = smu_send_smc_msg_with_param(smu, drv2smu ? ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram, SMU_MSG_TransferTableSmu2Dram,
table_id | ((argument & 0xFFFF) << 16)); table_id | ((argument & 0xFFFF) << 16),
NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1992,7 +1981,7 @@ int smu_set_mp1_state(struct smu_context *smu, ...@@ -1992,7 +1981,7 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0; return 0;
} }
ret = smu_send_smc_msg(smu, msg); ret = smu_send_smc_msg(smu, msg, NULL);
if (ret) if (ret)
pr_err("[PrepareMp1] Failed!\n"); pr_err("[PrepareMp1] Failed!\n");
...@@ -2670,12 +2659,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu) ...@@ -2670,12 +2659,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret; return ret;
} }
int smu_send_smc_msg(struct smu_context *smu,
enum smu_message_type msg)
{
int ret;
ret = smu_send_smc_msg_with_param(smu, msg, 0);
return ret;
}
...@@ -374,13 +374,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu, ...@@ -374,13 +374,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex, SMU_MSG_GetDpmFreqByIndex,
(clk_id << 16 | 0xFF)); (clk_id << 16 | 0xFF),
&num_of_levels);
if (ret) { if (ret) {
pr_err("[%s] failed to get dpm levels!\n", __func__); pr_err("[%s] failed to get dpm levels!\n", __func__);
return ret; return ret;
} }
smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) { if (!num_of_levels) {
pr_err("[%s] number of clk levels is invalid!\n", __func__); pr_err("[%s] number of clk levels is invalid!\n", __func__);
return -EINVAL; return -EINVAL;
...@@ -390,12 +390,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu, ...@@ -390,12 +390,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) { for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex, SMU_MSG_GetDpmFreqByIndex,
(clk_id << 16 | i)); (clk_id << 16 | i),
&clk);
if (ret) { if (ret) {
pr_err("[%s] failed to get dpm freq by index!\n", __func__); pr_err("[%s] failed to get dpm freq by index!\n", __func__);
return ret; return ret;
} }
smu_read_smc_arg(smu, &clk);
if (!clk) { if (!clk) {
pr_err("[%s] clk value is invalid!\n", __func__); pr_err("[%s] clk value is invalid!\n", __func__);
return -EINVAL; return -EINVAL;
...@@ -553,13 +553,13 @@ static int arcturus_run_btc(struct smu_context *smu) ...@@ -553,13 +553,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) { if (ret) {
pr_err("RunAfllBtc failed!\n"); pr_err("RunAfllBtc failed!\n");
return ret; return ret;
} }
return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
} }
static int arcturus_populate_umd_state_clk(struct smu_context *smu) static int arcturus_populate_umd_state_clk(struct smu_context *smu)
...@@ -744,7 +744,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -744,7 +744,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_GFXCLK << 16) | (freq & 0xffff)); (PPCLK_GFXCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s gfxclk !\n", pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -759,7 +760,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -759,7 +760,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff)); (PPCLK_UCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s memclk !\n", pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -774,7 +776,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -774,7 +776,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff)); (PPCLK_SOCCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s socclk !\n", pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -1289,12 +1292,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, ...@@ -1289,12 +1292,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
return -EINVAL; return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
power_src << 16); power_src << 16, &asic_default_power_limit);
if (ret) { if (ret) {
pr_err("[%s] get PPT limit failed!", __func__); pr_err("[%s] get PPT limit failed!", __func__);
return ret; return ret;
} }
smu_read_smc_arg(smu, &asic_default_power_limit);
} else { } else {
/* the last hope to figure out the ppt limit */ /* the last hope to figure out the ppt limit */
if (!pptable) { if (!pptable) {
...@@ -1498,7 +1500,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ...@@ -1498,7 +1500,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask, SMU_MSG_SetWorkloadMask,
1 << workload_type); 1 << workload_type,
NULL);
if (ret) { if (ret) {
pr_err("Fail to set workload type %d\n", workload_type); pr_err("Fail to set workload type %d\n", workload_type);
return ret; return ret;
...@@ -2233,7 +2236,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu, ...@@ -2233,7 +2236,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
} }
static const struct pptable_funcs arcturus_ppt_funcs = { static const struct pptable_funcs arcturus_ppt_funcs = {
......
...@@ -514,7 +514,7 @@ struct pptable_funcs { ...@@ -514,7 +514,7 @@ struct pptable_funcs {
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en); int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu, int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param); enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu); int (*set_allowed_mask)(struct smu_context *smu);
......
...@@ -138,6 +138,8 @@ enum smu_v11_0_baco_seq { ...@@ -138,6 +138,8 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT, BACO_SEQ_COUNT,
}; };
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
int smu_v11_0_init_microcode(struct smu_context *smu); int smu_v11_0_init_microcode(struct smu_context *smu);
int smu_v11_0_load_microcode(struct smu_context *smu); int smu_v11_0_load_microcode(struct smu_context *smu);
...@@ -182,9 +184,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu, ...@@ -182,9 +184,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int int
smu_v11_0_send_msg_with_param(struct smu_context *smu, smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg, enum smu_message_type msg,
uint32_t param); uint32_t param,
uint32_t *read_arg);
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
......
...@@ -47,7 +47,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu); ...@@ -47,7 +47,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu);
int int
smu_v12_0_send_msg_with_param(struct smu_context *smu, smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg, enum smu_message_type msg,
uint32_t param); uint32_t param,
uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu); int smu_v12_0_check_fw_status(struct smu_context *smu);
......
...@@ -661,14 +661,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) ...@@ -661,14 +661,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */ /* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->vcn_gated = false; power_gate->vcn_gated = false;
} else { } else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -686,14 +686,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) ...@@ -686,14 +686,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->jpeg_gated = false; power_gate->jpeg_gated = false;
} else { } else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1042,7 +1042,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) ...@@ -1042,7 +1042,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t max_freq = 0; uint32_t max_freq = 0;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1066,7 +1066,8 @@ static int navi10_display_config_changed(struct smu_context *smu) ...@@ -1066,7 +1066,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display); smu->display_config->num_display,
NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1391,7 +1392,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -1391,7 +1392,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type); 1 << workload_type, NULL);
return ret; return ret;
} }
...@@ -1456,7 +1457,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu) ...@@ -1456,7 +1457,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100); min_clocks.dcef_clock_in_sr/100,
NULL);
if (ret) { if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!"); pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret; return ret;
...@@ -1859,12 +1861,11 @@ static int navi10_get_power_limit(struct smu_context *smu, ...@@ -1859,12 +1861,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
return -EINVAL; return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
power_src << 16); power_src << 16, &asic_default_power_limit);
if (ret) { if (ret) {
pr_err("[%s] get PPT limit failed!", __func__); pr_err("[%s] get PPT limit failed!", __func__);
return ret; return ret;
} }
smu_read_smc_arg(smu, &asic_default_power_limit);
} else { } else {
/* the last hope to figure out the ppt limit */ /* the last hope to figure out the ppt limit */
if (!pptable) { if (!pptable) {
...@@ -1904,7 +1905,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, ...@@ -1904,7 +1905,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap); pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters, SMU_MSG_OverridePcieParameters,
smu_pcie_arg); smu_pcie_arg,
NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1950,13 +1952,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ...@@ -1950,13 +1952,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm, SMU_MSG_GetVoltageByDpm,
param); param,
&value);
if (ret) { if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret; return ret;
} }
smu_read_smc_arg(smu, &value);
*voltage = (uint16_t)value; *voltage = (uint16_t)value;
return 0; return 0;
...@@ -2213,7 +2215,7 @@ static int navi10_run_btc(struct smu_context *smu) ...@@ -2213,7 +2215,7 @@ static int navi10_run_btc(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret) if (ret)
pr_err("RunBtc failed!\n"); pr_err("RunBtc failed!\n");
...@@ -2225,9 +2227,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) ...@@ -2225,9 +2227,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0; int result = 0;
if (!enable) if (!enable)
result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else else
result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result; return result;
} }
......
...@@ -342,14 +342,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) ...@@ -342,14 +342,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */ /* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->vcn_gated = false; power_gate->vcn_gated = false;
} else { } else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -367,14 +367,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) ...@@ -367,14 +367,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->jpeg_gated = false; power_gate->jpeg_gated = false;
} else { } else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -622,22 +622,24 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -622,22 +622,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq : soft_max_level == 0 ? min_freq :
soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq : soft_min_level == 2 ? max_freq :
soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_SOCCLK: case SMU_SOCCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -645,10 +647,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -645,10 +647,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
case SMU_FCLK: case SMU_FCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -681,7 +683,8 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -681,7 +683,8 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
} }
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type); 1 << workload_type,
NULL);
if (ret) { if (ret) {
pr_err_once("Fail to set workload type %d\n", workload_type); pr_err_once("Fail to set workload type %d\n", workload_type);
return ret; return ret;
......
...@@ -79,10 +79,13 @@ ...@@ -79,10 +79,13 @@
#define smu_set_default_od_settings(smu, initialize) \ #define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
}
#define smu_send_smc_msg_with_param(smu, msg, param) \
((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
#define smu_read_smc_arg(smu, arg) \ #define smu_read_smc_arg(smu, arg) \
((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
#define smu_alloc_dpm_context(smu) \ #define smu_alloc_dpm_context(smu) \
......
This diff is collapsed.
...@@ -78,7 +78,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) ...@@ -78,7 +78,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
int int
smu_v12_0_send_msg_with_param(struct smu_context *smu, smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg, enum smu_message_type msg,
uint32_t param) uint32_t param,
uint32_t *read_arg)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0; int ret = 0, index = 0;
...@@ -101,9 +102,19 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, ...@@ -101,9 +102,19 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu); ret = smu_v12_0_wait_for_response(smu);
if (ret) if (ret) {
pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n", pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param); index, ret, param);
return ret;
}
if (read_arg) {
ret = smu_v12_0_read_arg(smu, read_arg);
if (ret) {
pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
return ret;
}
}
return ret; return ret;
} }
...@@ -163,9 +174,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) ...@@ -163,9 +174,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0; return 0;
if (gate) if (gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma); return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else else
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
} }
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
...@@ -174,9 +185,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) ...@@ -174,9 +185,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return 0; return 0;
if (gate) if (gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
else else
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
} }
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
...@@ -185,9 +196,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) ...@@ -185,9 +196,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
return 0; return 0;
if (gate) if (gate)
return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
else else
return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
} }
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
...@@ -196,7 +207,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) ...@@ -196,7 +207,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
return 0; return 0;
return smu_v12_0_send_msg_with_param(smu, return smu_v12_0_send_msg_with_param(smu,
SMU_MSG_SetGfxCGPG, enable ? 1 : 0); SMU_MSG_SetGfxCGPG,
enable ? 1 : 0,
NULL);
} }
int smu_v12_0_read_sensor(struct smu_context *smu, int smu_v12_0_read_sensor(struct smu_context *smu,
...@@ -262,10 +275,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) ...@@ -262,10 +275,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500; int ret = 0, timeout = 500;
if (enable) { if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else { } else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */ /* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) { while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
...@@ -331,17 +344,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu, ...@@ -331,17 +344,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
if (!feature_mask || num < 2) if (!feature_mask || num < 2)
return -EINVAL; return -EINVAL;
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
ret = smu_read_smc_arg(smu, &feature_mask_high);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
ret = smu_read_smc_arg(smu, &feature_mask_low);
if (ret) if (ret)
return ret; return ret;
...@@ -388,14 +395,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c ...@@ -388,14 +395,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency); ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) { if (ret) {
pr_err("Attempt to get max GX frequency from SMC Failed !\n"); pr_err("Attempt to get max GX frequency from SMC Failed !\n");
goto failed; goto failed;
} }
ret = smu_read_smc_arg(smu, max);
if (ret)
goto failed;
break; break;
case SMU_UCLK: case SMU_UCLK:
case SMU_FCLK: case SMU_FCLK:
...@@ -419,14 +423,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c ...@@ -419,14 +423,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency); ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) { if (ret) {
pr_err("Attempt to get min GX frequency from SMC Failed !\n"); pr_err("Attempt to get min GX frequency from SMC Failed !\n");
goto failed; goto failed;
} }
ret = smu_read_smc_arg(smu, min);
if (ret)
goto failed;
break; break;
case SMU_UCLK: case SMU_UCLK:
case SMU_FCLK: case SMU_FCLK:
...@@ -450,7 +451,7 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c ...@@ -450,7 +451,7 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
} }
int smu_v12_0_mode2_reset(struct smu_context *smu){ int smu_v12_0_mode2_reset(struct smu_context *smu){
return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
} }
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
...@@ -464,39 +465,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ ...@@ -464,39 +465,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_FCLK: case SMU_FCLK:
case SMU_MCLK: case SMU_MCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_SOCCLK: case SMU_SOCCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_VCLK: case SMU_VCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -515,11 +516,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) ...@@ -515,11 +516,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) { if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address)); upper_32_bits(driver_table->mc_address),
NULL);
if (!ret) if (!ret)
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address)); lower_32_bits(driver_table->mc_address),
NULL);
} }
return ret; return ret;
......
...@@ -587,7 +587,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu) ...@@ -587,7 +587,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
static int vega20_run_btc_afll(struct smu_context *smu) static int vega20_run_btc_afll(struct smu_context *smu)
{ {
return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
} }
#define FEATURE_MASK(feature) (1ULL << feature) #define FEATURE_MASK(feature) (1ULL << feature)
...@@ -670,13 +670,13 @@ vega20_set_single_dpm_table(struct smu_context *smu, ...@@ -670,13 +670,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex, SMU_MSG_GetDpmFreqByIndex,
(clk_id << 16 | 0xFF)); (clk_id << 16 | 0xFF),
&num_of_levels);
if (ret) { if (ret) {
pr_err("[GetNumOfDpmLevel] failed to get dpm levels!"); pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
return ret; return ret;
} }
smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) { if (!num_of_levels) {
pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!"); pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
return -EINVAL; return -EINVAL;
...@@ -687,12 +687,12 @@ vega20_set_single_dpm_table(struct smu_context *smu, ...@@ -687,12 +687,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) { for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex, SMU_MSG_GetDpmFreqByIndex,
(clk_id << 16 | i)); (clk_id << 16 | i),
&clk);
if (ret) { if (ret) {
pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!"); pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
return ret; return ret;
} }
smu_read_smc_arg(smu, &clk);
if (!clk) { if (!clk) {
pr_err("[GetDpmFreqByIndex] clk value is invalid!"); pr_err("[GetDpmFreqByIndex] clk value is invalid!");
return -EINVAL; return -EINVAL;
...@@ -1200,7 +1200,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -1200,7 +1200,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_GFXCLK << 16) | (freq & 0xffff)); (PPCLK_GFXCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s gfxclk !\n", pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -1215,7 +1216,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -1215,7 +1216,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff)); (PPCLK_UCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s memclk !\n", pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -1230,7 +1232,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -1230,7 +1232,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff)); (PPCLK_SOCCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s socclk !\n", pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -1245,7 +1248,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -1245,7 +1248,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level; single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_FCLK << 16) | (freq & 0xffff)); (PPCLK_FCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set soft %s fclk !\n", pr_err("Failed to set soft %s fclk !\n",
max ? "max" : "min"); max ? "max" : "min");
...@@ -1260,7 +1264,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, ...@@ -1260,7 +1264,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
if (!max) { if (!max) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq, SMU_MSG_SetHardMinByFreq,
(PPCLK_DCEFCLK << 16) | (freq & 0xffff)); (PPCLK_DCEFCLK << 16) | (freq & 0xffff),
NULL);
if (ret) { if (ret) {
pr_err("Failed to set hard min dcefclk !\n"); pr_err("Failed to set hard min dcefclk !\n");
return ret; return ret;
...@@ -1421,7 +1426,9 @@ static int vega20_force_clk_levels(struct smu_context *smu, ...@@ -1421,7 +1426,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
} }
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinLinkDpmByIndex, soft_min_level); SMU_MSG_SetMinLinkDpmByIndex,
soft_min_level,
NULL);
if (ret) if (ret)
pr_err("Failed to set min link dpm level!\n"); pr_err("Failed to set min link dpm level!\n");
...@@ -1477,13 +1484,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ...@@ -1477,13 +1484,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm, SMU_MSG_GetAVFSVoltageByDpm,
((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
voltage);
if (ret) { if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret; return ret;
} }
smu_read_smc_arg(smu, voltage);
*voltage = *voltage / VOLTAGE_SCALE; *voltage = *voltage / VOLTAGE_SCALE;
return 0; return 0;
...@@ -1956,8 +1963,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -1956,8 +1963,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
workload_type = smu_workload_get_type(smu, smu->power_profile_mode); workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, smu_send_smc_msg_with_param(smu,
1 << workload_type); SMU_MSG_SetWorkloadMask,
1 << workload_type,
NULL);
return ret; return ret;
} }
...@@ -2029,7 +2038,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu, ...@@ -2029,7 +2038,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq, SMU_MSG_SetHardMinByFreq,
(PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level); (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
NULL);
if (ret) { if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__); pr_err("[%s] Set hard min uclk failed!", __func__);
return ret; return ret;
...@@ -2047,7 +2057,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu) ...@@ -2047,7 +2057,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
if (!smu->smu_dpm.dpm_context) if (!smu->smu_dpm.dpm_context)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(smu, ret = vega20_set_uclk_to_highest_dpm_level(smu,
&dpm_table->mem_table); &dpm_table->mem_table);
if (ret) if (ret)
...@@ -2074,7 +2084,8 @@ static int vega20_display_config_changed(struct smu_context *smu) ...@@ -2074,7 +2084,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_send_smc_msg_with_param(smu, smu_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays, SMU_MSG_NumOfDisplays,
smu->display_config->num_display); smu->display_config->num_display,
NULL);
} }
return ret; return ret;
...@@ -2247,7 +2258,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) ...@@ -2247,7 +2258,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100); min_clocks.dcef_clock_in_sr/100,
NULL);
if (ret) { if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!"); pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret; return ret;
...@@ -2262,7 +2274,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) ...@@ -2262,7 +2274,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100; memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq, SMU_MSG_SetHardMinByFreq,
(PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level); (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
NULL);
if (ret) { if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__); pr_err("[%s] Set hard min uclk failed!", __func__);
return ret; return ret;
...@@ -2853,8 +2866,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) ...@@ -2853,8 +2866,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table; struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable; PPTable_t *pptable = table_context->driver_pptable;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, ret = smu_send_smc_msg_with_param(smu,
(uint32_t)pptable->FanTargetTemperature); SMU_MSG_SetFanTemperatureTarget,
(uint32_t)pptable->FanTargetTemperature,
NULL);
return ret; return ret;
} }
...@@ -2864,15 +2879,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu, ...@@ -2864,15 +2879,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
{ {
int ret; int ret;
ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
if (ret) { if (ret) {
pr_err("Attempt to get current RPM from SMC Failed!\n"); pr_err("Attempt to get current RPM from SMC Failed!\n");
return ret; return ret;
} }
smu_read_smc_arg(smu, speed);
return 0; return 0;
} }
...@@ -3137,7 +3150,7 @@ static int vega20_set_df_cstate(struct smu_context *smu, ...@@ -3137,7 +3150,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
} }
static int vega20_update_pcie_parameters(struct smu_context *smu, static int vega20_update_pcie_parameters(struct smu_context *smu,
...@@ -3155,7 +3168,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu, ...@@ -3155,7 +3168,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap); pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu, ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters, SMU_MSG_OverridePcieParameters,
smu_pcie_arg); smu_pcie_arg,
NULL);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment