Commit c1dd5d29 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.0-2022-08-31' of...

Merge tag 'amd-drm-fixes-6.0-2022-08-31' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.0-2022-08-31:

amdgpu:
- FRU error message fix
- MES 11 updates
- DCN 3.2.x fixes
- DCN 3.1.4 fixes
- Fix possible use after free in CS IOCTL
- SMU 13.0.x fixes
- Fix iolink reporting on devices with direct connections to CPU
- GFX10 tap delay firmware fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220831212312.5921-1-alexander.deucher@amd.com
parents a71f3950 39c84b8e
......@@ -5524,7 +5524,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
......
......@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
sizeof(atom_ctx->vbios_version))) {
if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
return true;
else
return false;
else
return true;
} else {
return false;
}
default:
return false;
}
......
......@@ -159,7 +159,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
dma_fence_put(&job->hw_fence);
if (!job->hw_fence.ops)
kfree(job);
else
dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
......
......@@ -2401,7 +2401,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
if (!ucode->fw)
if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
......
......@@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
......
......@@ -183,6 +183,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
......
......@@ -1094,7 +1094,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
!dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
......
......@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
if (enc->ctx->dce_version >= DCN_VERSION_3_15)
return true;
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
......
......@@ -317,6 +317,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
......
......@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
......
......@@ -454,6 +454,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
......
......@@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
......
......@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
/* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
* so set it to 1/2 full = 7 before reset as suggested by hardware team.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
......
......@@ -295,24 +295,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
// Include cursor size for CAB allocation
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
switch (stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
break;
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
struct hubp *hubp = pipe->plane_res.hubp;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
break;
}
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
if (pipe->stream && pipe->plane_state && hubp)
/* Find the cursor plane and use the exact size instead of
* using the max for calculation
*/
if (hubp->curs_attr.width > 0) {
cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
break;
}
}
switch (stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
break;
}
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part);
}
}
......@@ -325,6 +339,26 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
plane = ctx->stream_status[i].plane_states[j];
if (stream->cursor_position.enable && plane &&
!plane->address.grph.cursor_cache_addr.quad_part &&
cursor_size > 16384) {
/* Cursor caching is not supported since it won't be on the same line.
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
* from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
*/
num_ways++;
/* We only expect one cursor plane */
break;
}
}
}
return num_ways;
}
......
......@@ -144,7 +144,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
return false;
if (!pipe->plane_state)
return false;
......
......@@ -1014,6 +1014,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
// to re-initialize viewport after the pipe merge
for (int i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state || !pipe_ctx->stream)
continue;
resource_build_scaling_params(pipe_ctx);
}
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
......
......@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
......@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
......
......@@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t reserved : 22;
uint32_t trap_en : 1;
uint32_t reserved : 21;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
......
......@@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x22
#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
......
......@@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
......@@ -291,5 +291,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
void **table,
uint32_t *size,
uint32_t pptable_id);
#endif
#endif
......@@ -84,9 +84,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
uint32_t pptable_id);
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
......@@ -224,23 +221,19 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
* - use 36831 signed pptable when pp_table_id is 3683
* - use 37151 signed pptable when pp_table_id is 3715
* - use 36641 signed pptable when pp_table_id is 3664 or 0
* TODO: drop these when the pptable carried in vbios is ready.
* - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
* - use 36831 soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
case 0:
case 3664:
pptable_id = 36641;
case 3715:
case 3795:
pptable_id = 0;
break;
case 3683:
pptable_id = 36831;
break;
case 3715:
pptable_id = 37151;
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
......@@ -425,8 +418,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
uint32_t pptable_id)
int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
void **table,
uint32_t *size,
uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
......
......@@ -388,11 +388,29 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
int ret = 0;
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
*table = combo_pptable;
*size = sizeof(struct smu_13_0_0_powerplay_table);
return 0;
}
static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
uint32_t pptable_id;
int ret = 0;
/*
......@@ -401,17 +419,51 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
* rely on the combo pptable(and its revelant SMU message).
*/
if (adev->scpm_enabled) {
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
smu->smu_table.power_play_table = combo_pptable;
smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
} else {
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu_table->boot_values.pp_table_id;
}
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
* - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
* - use soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
case 3664:
case 3715:
case 3795:
pptable_id = 0;
break;
case 3683:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
}
}
/* force using vbios pptable in sriov mode */
if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
else
ret = smu_v13_0_get_pptable_from_firmware(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size,
pptable_id);
}
if (ret)
return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
......
......@@ -400,11 +400,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
int ret = 0;
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
*table = combo_pptable;
*size = sizeof(struct smu_13_0_7_powerplay_table);
return 0;
}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
......@@ -413,18 +429,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
if (adev->scpm_enabled) {
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
smu->smu_table.power_play_table = combo_pptable;
smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
} else {
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
}
ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
if (ret)
return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment