Commit 8bb575a2 authored by Rex Zhu's avatar Rex Zhu Committed by Alex Deucher

drm/amd/pp: Save vf state in pp context

Store vf state in pp_context so we can
deprecate the cgs interface.
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarRex Zhu <Rex.Zhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b61e54cb
...@@ -46,7 +46,8 @@ static int amd_powerplay_create(struct amdgpu_device *adev) ...@@ -46,7 +46,8 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
return -ENOMEM; return -ENOMEM;
hwmgr->adev = adev; hwmgr->adev = adev;
hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev); hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock); mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family; hwmgr->chip_family = adev->family;
......
...@@ -718,6 +718,7 @@ struct pp_hwmgr { ...@@ -718,6 +718,7 @@ struct pp_hwmgr {
uint32_t chip_family; uint32_t chip_family;
uint32_t chip_id; uint32_t chip_id;
uint32_t smu_version; uint32_t smu_version;
bool not_vf;
bool pm_en; bool pm_en;
struct mutex smu_lock; struct mutex smu_lock;
......
...@@ -288,8 +288,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) ...@@ -288,8 +288,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */ /* Only start SMC if SMC RAM is not running */
if (!(smu7_is_smc_ram_running(hwmgr) if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
|| cgs_is_virtualization_enabled(hwmgr->device))) {
/* Check if SMU is running in protected mode */ /* Check if SMU is running in protected mode */
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, CGS_IND_REG__SMC,
...@@ -335,8 +334,8 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) ...@@ -335,8 +334,8 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
uint32_t efuse = 0; uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
if (cgs_is_virtualization_enabled(hwmgr->device)) if (!hwmgr->not_vf)
return 0; return false;
if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) { mask, &efuse)) {
......
...@@ -295,8 +295,7 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr) ...@@ -295,8 +295,7 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */ /* Only start SMC if SMC RAM is not running */
if (!(smu7_is_smc_ram_running(hwmgr) if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
|| cgs_is_virtualization_enabled(hwmgr->device))) {
smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
......
...@@ -375,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, ...@@ -375,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
entry->meta_data_addr_low = 0; entry->meta_data_addr_low = 0;
/* digest need be excluded out */ /* digest need be excluded out */
if (cgs_is_virtualization_enabled(hwmgr->device)) if (!hwmgr->not_vf)
info.image_size -= 20; info.image_size -= 20;
entry->data_size_byte = info.image_size; entry->data_size_byte = info.image_size;
entry->num_register_entries = 0; entry->num_register_entries = 0;
...@@ -409,7 +409,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) ...@@ -409,7 +409,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
0x0); 0x0);
if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
if (!cgs_is_virtualization_enabled(hwmgr->device)) { if (hwmgr->not_vf) {
smu7_send_msg_to_smc_with_parameter(hwmgr, smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI, PPSMC_MSG_SMU_DRAM_ADDR_HI,
upper_32_bits(smu_data->smu_buffer.mc_addr)); upper_32_bits(smu_data->smu_buffer.mc_addr));
...@@ -467,7 +467,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) ...@@ -467,7 +467,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL); "Failed to Get Firmware Entry.", return -EINVAL);
if (cgs_is_virtualization_enabled(hwmgr->device)) if (!hwmgr->not_vf)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL); "Failed to Get Firmware Entry.", return -EINVAL);
...@@ -608,7 +608,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) ...@@ -608,7 +608,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
smu_data->header = smu_data->header_buffer.kaddr; smu_data->header = smu_data->header_buffer.kaddr;
smu_data->header_buffer.mc_addr = mc_addr; smu_data->header_buffer.mc_addr = mc_addr;
if (cgs_is_virtualization_enabled(hwmgr->device)) if (!hwmgr->not_vf)
return 0; return 0;
smu_data->smu_buffer.data_size = 200*4096; smu_data->smu_buffer.data_size = 200*4096;
...@@ -643,7 +643,7 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) ...@@ -643,7 +643,7 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.mc_addr, &smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr); &smu_data->header_buffer.kaddr);
if (!cgs_is_virtualization_enabled(hwmgr->device)) if (hwmgr->not_vf)
amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle, amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
&smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr); &smu_data->smu_buffer.kaddr);
......
...@@ -199,8 +199,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) ...@@ -199,8 +199,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
int result; int result;
/* Only start SMC if SMC RAM is not running */ /* Only start SMC if SMC RAM is not running */
if (!(smu7_is_smc_ram_running(hwmgr) || if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
cgs_is_virtualization_enabled(hwmgr->device))) {
/*Check if SMU is running in protected mode*/ /*Check if SMU is running in protected mode*/
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) { SMU_FIRMWARE, SMU_MODE)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment