Commit ebfc2533 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/pm: do not expose the smu_context structure used internally in power

This can cover the power implementation details. And as what did for
powerplay framework, we hook the smu_context to adev->powerplay.pp_handle.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7689dab4
...@@ -99,7 +99,6 @@ ...@@ -99,7 +99,6 @@
#include "amdgpu_gem.h" #include "amdgpu_gem.h"
#include "amdgpu_doorbell.h" #include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu_smu.h"
#include "amdgpu_discovery.h" #include "amdgpu_discovery.h"
#include "amdgpu_mes.h" #include "amdgpu_mes.h"
#include "amdgpu_umc.h" #include "amdgpu_umc.h"
...@@ -949,11 +948,6 @@ struct amdgpu_device { ...@@ -949,11 +948,6 @@ struct amdgpu_device {
/* powerplay */ /* powerplay */
struct amd_powerplay powerplay; struct amd_powerplay powerplay;
/* smu */
struct smu_context smu;
/* dpm */
struct amdgpu_pm pm; struct amdgpu_pm pm;
u32 cg_flags; u32 cg_flags;
u32 pg_flags; u32 pg_flags;
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#define __KGD_PP_INTERFACE_H__ #define __KGD_PP_INTERFACE_H__
extern const struct amdgpu_ip_block_version pp_smu_ip_block; extern const struct amdgpu_ip_block_version pp_smu_ip_block;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
enum smu_event_type { enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0, SMU_EVENT_RESET_COMPLETE = 0,
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "amdgpu_display.h" #include "amdgpu_display.h"
#include "hwmgr.h" #include "hwmgr.h"
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include "amdgpu_smu.h"
#define amdgpu_dpm_enable_bapm(adev, e) \ #define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
...@@ -213,7 +214,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) ...@@ -213,7 +214,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
return smu_mode1_reset_is_support(smu); return smu_mode1_reset_is_support(smu);
...@@ -223,7 +224,7 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) ...@@ -223,7 +224,7 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
return smu_mode1_reset(smu); return smu_mode1_reset(smu);
...@@ -276,7 +277,7 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, ...@@ -276,7 +277,7 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
return smu_allow_xgmi_power_down(smu, en); return smu_allow_xgmi_power_down(smu, en);
...@@ -341,7 +342,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) ...@@ -341,7 +342,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
smu_set_ac_dc(&adev->smu); smu_set_ac_dc(adev->powerplay.pp_handle);
} }
} }
...@@ -426,12 +427,14 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio ...@@ -426,12 +427,14 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
{ {
return smu_handle_passthrough_sbr(&adev->smu, enable); return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable);
} }
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
{ {
return smu_send_hbm_bad_pages_num(&adev->smu, size); struct smu_context *smu = adev->powerplay.pp_handle;
return smu_send_hbm_bad_pages_num(smu, size);
} }
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
...@@ -444,7 +447,7 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, ...@@ -444,7 +447,7 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max); return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -455,12 +458,14 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, ...@@ -455,12 +458,14 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
uint32_t min, uint32_t min,
uint32_t max) uint32_t max)
{ {
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev)) if (!is_support_sw_smu(adev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max); return smu_set_soft_freq_range(smu, SMU_SCLK, min, max);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -468,33 +473,41 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, ...@@ -468,33 +473,41 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
{ {
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev)) if (!is_support_sw_smu(adev))
return 0; return 0;
return smu_write_watermarks_table(&adev->smu); return smu_write_watermarks_table(smu);
} }
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
enum smu_event_type event, enum smu_event_type event,
uint64_t event_arg) uint64_t event_arg)
{ {
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev)) if (!is_support_sw_smu(adev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return smu_wait_for_event(&adev->smu, event, event_arg); return smu_wait_for_event(smu, event, event_arg);
} }
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{ {
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev)) if (!is_support_sw_smu(adev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return smu_get_status_gfxoff(&adev->smu, value); return smu_get_status_gfxoff(smu, value);
} }
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
{ {
return atomic64_read(&adev->smu.throttle_int_counter); struct smu_context *smu = adev->powerplay.pp_handle;
return atomic64_read(&smu->throttle_int_counter);
} }
/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
...@@ -516,10 +529,12 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, ...@@ -516,10 +529,12 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
void *umc_ecc) void *umc_ecc)
{ {
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev)) if (!is_support_sw_smu(adev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return smu_get_ecc_info(&adev->smu, umc_ecc); return smu_get_ecc_info(smu, umc_ecc);
} }
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
...@@ -943,9 +958,10 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, ...@@ -943,9 +958,10 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{ {
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
struct smu_context *smu = adev->powerplay.pp_handle;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || if ((is_support_sw_smu(adev) && smu->od_enabled) ||
(is_support_sw_smu(adev) && adev->smu.is_apu) || (is_support_sw_smu(adev) && smu->is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled)) (!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true; return true;
...@@ -968,7 +984,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, ...@@ -968,7 +984,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
{ {
return adev->smu.cpu_core_num; struct smu_context *smu = adev->powerplay.pp_handle;
return smu->cpu_core_num;
} }
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
......
...@@ -2869,7 +2869,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, ...@@ -2869,7 +2869,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
int limit_type = to_sensor_dev_attr(attr)->index; int limit_type = to_sensor_dev_attr(attr)->index;
return sysfs_emit(buf, "%s\n", return sysfs_emit(buf, "%s\n",
limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT"); limit_type == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT");
} }
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
......
...@@ -1390,10 +1390,6 @@ int smu_mode1_reset(struct smu_context *smu); ...@@ -1390,10 +1390,6 @@ int smu_mode1_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs; extern const struct amd_ip_funcs smu_ip_funcs;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
bool is_support_sw_smu(struct amdgpu_device *adev); bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev); bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_write_watermarks_table(struct smu_context *smu); int smu_write_watermarks_table(struct smu_context *smu);
......
...@@ -468,7 +468,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) ...@@ -468,7 +468,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_cclk_dpm(struct amdgpu_device *adev) bool is_support_cclk_dpm(struct amdgpu_device *adev)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
return false; return false;
...@@ -572,7 +572,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) ...@@ -572,7 +572,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
static int smu_set_funcs(struct amdgpu_device *adev) static int smu_set_funcs(struct amdgpu_device *adev)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true; smu->od_enabled = true;
...@@ -624,7 +624,11 @@ static int smu_set_funcs(struct amdgpu_device *adev) ...@@ -624,7 +624,11 @@ static int smu_set_funcs(struct amdgpu_device *adev)
static int smu_early_init(void *handle) static int smu_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu;
smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
if (!smu)
return -ENOMEM;
smu->adev = adev; smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm; smu->pm_enabled = !!amdgpu_dpm;
...@@ -684,7 +688,7 @@ static int smu_set_default_dpm_table(struct smu_context *smu) ...@@ -684,7 +688,7 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
static int smu_late_init(void *handle) static int smu_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0; int ret = 0;
smu_set_fine_grain_gfx_freq_parameters(smu); smu_set_fine_grain_gfx_freq_parameters(smu);
...@@ -730,7 +734,7 @@ static int smu_late_init(void *handle) ...@@ -730,7 +734,7 @@ static int smu_late_init(void *handle)
smu_get_fan_parameters(smu); smu_get_fan_parameters(smu);
smu_handle_task(&adev->smu, smu_handle_task(smu,
smu->smu_dpm.dpm_level, smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT, AMD_PP_TASK_COMPLETE_INIT,
false); false);
...@@ -1020,7 +1024,7 @@ static void smu_interrupt_work_fn(struct work_struct *work) ...@@ -1020,7 +1024,7 @@ static void smu_interrupt_work_fn(struct work_struct *work)
static int smu_sw_init(void *handle) static int smu_sw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
int ret; int ret;
smu->pool_size = adev->pm.smu_prv_buffer_size; smu->pool_size = adev->pm.smu_prv_buffer_size;
...@@ -1095,7 +1099,7 @@ static int smu_sw_init(void *handle) ...@@ -1095,7 +1099,7 @@ static int smu_sw_init(void *handle)
static int smu_sw_fini(void *handle) static int smu_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
int ret; int ret;
ret = smu_smc_table_sw_fini(smu); ret = smu_smc_table_sw_fini(smu);
...@@ -1330,7 +1334,7 @@ static int smu_hw_init(void *handle) ...@@ -1330,7 +1334,7 @@ static int smu_hw_init(void *handle)
{ {
int ret; int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
smu->pm_enabled = false; smu->pm_enabled = false;
...@@ -1346,7 +1350,7 @@ static int smu_hw_init(void *handle) ...@@ -1346,7 +1350,7 @@ static int smu_hw_init(void *handle)
if (smu->is_apu) { if (smu->is_apu) {
smu_dpm_set_vcn_enable(smu, true); smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(&adev->smu, true); smu_set_gfx_cgpg(smu, true);
} }
if (!smu->pm_enabled) if (!smu->pm_enabled)
...@@ -1506,7 +1510,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) ...@@ -1506,7 +1510,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
static int smu_hw_fini(void *handle) static int smu_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0; return 0;
...@@ -1525,6 +1529,14 @@ static int smu_hw_fini(void *handle) ...@@ -1525,6 +1529,14 @@ static int smu_hw_fini(void *handle)
return smu_smc_hw_cleanup(smu); return smu_smc_hw_cleanup(smu);
} }
static void smu_late_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct smu_context *smu = adev->powerplay.pp_handle;
kfree(smu);
}
static int smu_reset(struct smu_context *smu) static int smu_reset(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
...@@ -1552,7 +1564,7 @@ static int smu_reset(struct smu_context *smu) ...@@ -1552,7 +1564,7 @@ static int smu_reset(struct smu_context *smu)
static int smu_suspend(void *handle) static int smu_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
int ret; int ret;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
...@@ -1569,7 +1581,7 @@ static int smu_suspend(void *handle) ...@@ -1569,7 +1581,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
smu_set_gfx_cgpg(&adev->smu, false); smu_set_gfx_cgpg(smu, false);
return 0; return 0;
} }
...@@ -1578,7 +1590,7 @@ static int smu_resume(void *handle) ...@@ -1578,7 +1590,7 @@ static int smu_resume(void *handle)
{ {
int ret; int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0; return 0;
...@@ -1600,7 +1612,7 @@ static int smu_resume(void *handle) ...@@ -1600,7 +1612,7 @@ static int smu_resume(void *handle)
return ret; return ret;
} }
smu_set_gfx_cgpg(&adev->smu, true); smu_set_gfx_cgpg(smu, true);
smu->disable_uclk_switch = 0; smu->disable_uclk_switch = 0;
...@@ -2132,6 +2144,7 @@ const struct amd_ip_funcs smu_ip_funcs = { ...@@ -2132,6 +2144,7 @@ const struct amd_ip_funcs smu_ip_funcs = {
.sw_fini = smu_sw_fini, .sw_fini = smu_sw_fini,
.hw_init = smu_hw_init, .hw_init = smu_hw_init,
.hw_fini = smu_hw_fini, .hw_fini = smu_hw_fini,
.late_fini = smu_late_fini,
.suspend = smu_suspend, .suspend = smu_suspend,
.resume = smu_resume, .resume = smu_resume,
.is_idle = NULL, .is_idle = NULL,
...@@ -3196,7 +3209,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) ...@@ -3196,7 +3209,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{ {
struct amdgpu_device *adev = filp->f_inode->i_private; struct amdgpu_device *adev = filp->f_inode->i_private;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
unsigned char *buf; unsigned char *buf;
int r; int r;
...@@ -3221,7 +3234,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t ...@@ -3221,7 +3234,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t
loff_t *pos) loff_t *pos)
{ {
struct amdgpu_device *adev = filp->f_inode->i_private; struct amdgpu_device *adev = filp->f_inode->i_private;
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (!filp->private_data) if (!filp->private_data)
...@@ -3262,7 +3275,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) ...@@ -3262,7 +3275,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu->stb_context.stb_buf_size) if (!smu->stb_context.stb_buf_size)
return; return;
...@@ -3274,7 +3287,6 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) ...@@ -3274,7 +3287,6 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
&smu_stb_debugfs_fops, &smu_stb_debugfs_fops,
smu->stb_context.stb_buf_size); smu->stb_context.stb_buf_size);
#endif #endif
} }
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
......
...@@ -2072,7 +2072,8 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -2072,7 +2072,8 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs) struct i2c_msg *msg, int num_msgs)
{ {
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
struct smu_table_context *smu_table = &adev->smu.smu_table; struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table; struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c; int i, j, r, c;
...@@ -2118,9 +2119,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -2118,9 +2119,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
} }
} }
} }
mutex_lock(&adev->smu.mutex); mutex_lock(&smu->mutex);
r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
mutex_unlock(&adev->smu.mutex); mutex_unlock(&smu->mutex);
if (r) if (r)
goto fail; goto fail;
......
...@@ -2779,7 +2779,8 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -2779,7 +2779,8 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs) struct i2c_msg *msg, int num_msgs)
{ {
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
struct smu_table_context *smu_table = &adev->smu.smu_table; struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table; struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c; int i, j, r, c;
...@@ -2825,9 +2826,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -2825,9 +2826,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
} }
} }
} }
mutex_lock(&adev->smu.mutex); mutex_lock(&smu->mutex);
r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
mutex_unlock(&adev->smu.mutex); mutex_unlock(&smu->mutex);
if (r) if (r)
goto fail; goto fail;
......
...@@ -3459,7 +3459,8 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -3459,7 +3459,8 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs) struct i2c_msg *msg, int num_msgs)
{ {
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
struct smu_table_context *smu_table = &adev->smu.smu_table; struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table; struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c; int i, j, r, c;
...@@ -3505,9 +3506,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -3505,9 +3506,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
} }
} }
} }
mutex_lock(&adev->smu.mutex); mutex_lock(&smu->mutex);
r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
mutex_unlock(&adev->smu.mutex); mutex_unlock(&smu->mutex);
if (r) if (r)
goto fail; goto fail;
......
...@@ -1372,7 +1372,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, ...@@ -1372,7 +1372,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep, unsigned tyep,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high; uint32_t low, high;
uint32_t val = 0; uint32_t val = 0;
...@@ -1441,7 +1441,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, ...@@ -1441,7 +1441,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id; uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id; uint32_t src_id = entry->src_id;
/* /*
......
...@@ -1475,7 +1475,8 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -1475,7 +1475,8 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs) struct i2c_msg *msg, int num_msgs)
{ {
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
struct smu_table_context *smu_table = &adev->smu.smu_table; struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table; struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c; int i, j, r, c;
...@@ -1521,9 +1522,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, ...@@ -1521,9 +1522,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
} }
} }
} }
mutex_lock(&adev->smu.mutex); mutex_lock(&smu->mutex);
r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
mutex_unlock(&adev->smu.mutex); mutex_unlock(&smu->mutex);
if (r) if (r)
goto fail; goto fail;
......
...@@ -1200,7 +1200,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, ...@@ -1200,7 +1200,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep, unsigned tyep,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high; uint32_t low, high;
uint32_t val = 0; uint32_t val = 0;
...@@ -1275,7 +1275,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, ...@@ -1275,7 +1275,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id; uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id; uint32_t src_id = entry->src_id;
/* /*
...@@ -1321,11 +1321,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, ...@@ -1321,11 +1321,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) { switch (ctxid) {
case 0x3: case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n"); dev_dbg(adev->dev, "Switched to AC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(&adev->smu); smu_v13_0_ack_ac_dc_interrupt(smu);
break; break;
case 0x4: case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n"); dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(&adev->smu); smu_v13_0_ack_ac_dc_interrupt(smu);
break; break;
case 0x7: case 0x7:
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment