Commit 2eb7d4b9 authored by Dillon Varone's avatar Dillon Varone Committed by Alex Deucher

drm/amd/display: Refactor dcn401_update_clocks

[WHY & HOW]
Refactor complex code into manageable functions. This also cleans up
some updating logics.
Reviewed-by: default avatarAlvin Lee <alvin.lee2@amd.com>
Acked-by: default avatarAlex Hung <alex.hung@amd.com>
Signed-off-by: default avatarDillon Varone <dillon.varone@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 61f88003
......@@ -36,7 +36,13 @@
#define DALSMC_MSG_SetFclkSwitchAllow 0x11
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_MSG_SetWorstCaseUclkLatency 0x13
#define DALSMC_Message_Count 0x14
#define DALSMC_MSG_DcnExitReset 0x14
#define DALSMC_MSG_ReturnHardMinStatus 0x15
#define DALSMC_MSG_SetAlwaysWaitDmcubResp 0x16
#define DALSMC_MSG_IndicateDrrStatus 0x17 // PMFW 15811
#define DALSMC_MSG_ActiveUclkFclk 0x18
#define DALSMC_MSG_IdleUclkFclk 0x19
#define DALSMC_Message_Count 0x1A
typedef enum {
FCLK_SWITCH_DISALLOW,
......
......@@ -67,6 +67,9 @@ static const struct clk_mgr_mask clk_mgr_mask_dcn401 = {
CLK_COMMON_MASK_SH_LIST_DCN401(_MASK)
};
#define TO_DCN401_CLK_MGR(clk_mgr)\
container_of(clk_mgr, struct dcn401_clk_mgr, base)
static bool dcn401_is_ppclk_dpm_enabled(struct clk_mgr_internal *clk_mgr, PPCLK_e clk)
{
bool ppclk_dpm_enabled = false;
......@@ -112,6 +115,30 @@ static bool dcn401_is_ppclk_dpm_enabled(struct clk_mgr_internal *clk_mgr, PPCLK_
return ppclk_dpm_enabled;
}
static bool dcn401_is_ppclk_idle_dpm_enabled(struct clk_mgr_internal *clk_mgr, PPCLK_e clk)
{
bool ppclk_idle_dpm_enabled = false;
switch (clk) {
case PPCLK_UCLK:
case PPCLK_FCLK:
if (ASICREV_IS_GC_12_0_0_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) &&
clk_mgr->smu_ver >= 0x681800) {
ppclk_idle_dpm_enabled = true;
} else if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) &&
clk_mgr->smu_ver >= 0x661300) {
ppclk_idle_dpm_enabled = true;
}
break;
default:
ppclk_idle_dpm_enabled = false;
}
ppclk_idle_dpm_enabled &= clk_mgr->smu_present;
return ppclk_idle_dpm_enabled;
}
/* Query SMU for all clock states for a particular clock */
static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0,
unsigned int *num_levels)
......@@ -470,7 +497,7 @@ static void dcn401_update_clocks_update_dentist(
}
static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
static void dcn401_update_clocks_legacy(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
......@@ -512,7 +539,7 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr->smu_present) {
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
dcn401_smu_set_num_of_displays(clk_mgr, display_count);
clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
......@@ -542,7 +569,7 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
dcn401_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
}
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
......@@ -668,6 +695,496 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
}
static void dcn401_build_update_clocks_sequence(
struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower,
unsigned int *num_steps)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence;
bool force_reset = false;
bool enter_display_off = false;
bool update_active_fclk = false;
bool update_active_uclk = false;
bool update_idle_fclk = false;
bool update_idle_uclk = false;
bool update_dispclk = false;
bool update_dppclk = false;
bool dppclk_lowered = false;
bool is_idle_dpm_enabled = dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_FCLK);
int total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
int active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz);
int active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz);
int idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz);
int idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz);
int display_count;
bool fclk_p_state_change_support, uclk_p_state_change_support;
*num_steps = 0;
/* CLK_MGR401_READ_CLOCKS_FROM_DENTIST */
if (clk_mgr_base->clks.dispclk_khz == 0 ||
(dc->debug.force_clock_mode & 0x1)) {
/* This is from resume or boot up, if forced_clock cfg option used,
* we bypass program dispclk and DPPCLK, but need set them for S3.
* Force_clock_mode 0x1: force reset the clock even it is the same clock
* as long as it is in Passive level.
*/
force_reset = true;
block_sequence[*num_steps].func = CLK_MGR401_READ_CLOCKS_FROM_DENTIST;
(*num_steps)++;
}
/* CLK_MGR401_UPDATE_NUM_DISPLAYS */
if (clk_mgr_internal->smu_present) {
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
block_sequence[*num_steps].params.update_num_displays_params.num_displays = display_count;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_NUM_DISPLAYS;
(*num_steps)++;
}
}
/* CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT */
clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
update_active_fclk = true;
update_idle_fclk = true;
/* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
if (clk_mgr_base->clks.fclk_p_state_change_support) {
/* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
block_sequence[*num_steps].params.update_fclk_pstate_support_params.support = FCLK_PSTATE_SUPPORTED;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
(*num_steps)++;
}
} else {
/* P-State is not supported so force max clocks */
idle_fclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1].fclk_mhz;
active_fclk_mhz = idle_fclk_mhz;
}
}
/* UPDATE DCFCLK */
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DCFCLK)) {
block_sequence[*num_steps].params.update_hardmin_params.ppclk = PPCLK_DCFCLK;
block_sequence[*num_steps].params.update_hardmin_params.freq_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz);
block_sequence[*num_steps].params.update_hardmin_params.response = NULL;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK;
(*num_steps)++;
}
}
/* CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK */
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DCFCLK)) {
block_sequence[*num_steps].params.update_deep_sleep_dcfclk_params.freq_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz);
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK;
(*num_steps)++;
}
}
/* SOCCLK */
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
/* We don't actually care about socclk, don't notify SMU of hard min */
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
/* CLK_MGR401_UPDATE_CAB_FOR_UCLK */
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) {
block_sequence[*num_steps].params.update_cab_for_uclk_params.num_ways = clk_mgr_base->clks.num_ways;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_CAB_FOR_UCLK;
(*num_steps)++;
}
}
/* UCLK */
uclk_p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, uclk_p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = uclk_p_state_change_support;
update_active_uclk = true;
update_idle_uclk = true;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support) {
if (dc->clk_mgr->dc_mode_softmax_enabled) {
/* will never have the functional UCLK min above the softmax
* since we calculate mode support based on softmax being the max UCLK
* frequency.
*/
active_uclk_mhz = clk_mgr_base->bw_params->dc_mode_softmax_memclk;
} else {
active_uclk_mhz = clk_mgr_base->bw_params->max_memclk_mhz;
}
idle_uclk_mhz = active_uclk_mhz;
}
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
if (clk_mgr_base->clks.p_state_change_support) {
update_active_uclk = true;
active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz);
}
}
if (should_set_clock(safe_to_lower, new_clocks->idle_dramclk_khz, clk_mgr_base->clks.idle_dramclk_khz)) {
clk_mgr_base->clks.idle_dramclk_khz = new_clocks->idle_dramclk_khz;
if (clk_mgr_base->clks.p_state_change_support) {
update_idle_uclk = true;
idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz);
}
}
/* set UCLK to requested value */
if ((update_active_uclk || update_idle_uclk) &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
!is_idle_dpm_enabled) {
block_sequence[*num_steps].params.update_hardmin_params.ppclk = PPCLK_UCLK;
block_sequence[*num_steps].params.update_hardmin_params.freq_mhz = active_uclk_mhz;
block_sequence[*num_steps].params.update_hardmin_params.response = NULL;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK;
(*num_steps)++;
}
/* FCLK */
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
if (clk_mgr_base->clks.fclk_p_state_change_support) {
update_active_fclk = true;
active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz);
}
}
if (should_set_clock(safe_to_lower, new_clocks->idle_fclk_khz, clk_mgr_base->clks.idle_fclk_khz)) {
clk_mgr_base->clks.idle_fclk_khz = new_clocks->idle_fclk_khz;
if (clk_mgr_base->clks.fclk_p_state_change_support) {
update_idle_fclk = true;
idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz);
}
}
/* When idle DPM is enabled, need to send active and idle hardmins separately */
/* CLK_MGR401_UPDATE_ACTIVE_HARDMINS */
if ((update_active_uclk || update_active_fclk) && is_idle_dpm_enabled) {
block_sequence[*num_steps].params.update_idle_hardmin_params.uclk_mhz = active_uclk_mhz;
block_sequence[*num_steps].params.update_idle_hardmin_params.fclk_mhz = active_fclk_mhz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_ACTIVE_HARDMINS;
(*num_steps)++;
}
/* CLK_MGR401_UPDATE_IDLE_HARDMINS */
if ((update_idle_uclk || update_idle_uclk) && is_idle_dpm_enabled) {
block_sequence[*num_steps].params.update_idle_hardmin_params.uclk_mhz = idle_uclk_mhz;
block_sequence[*num_steps].params.update_idle_hardmin_params.fclk_mhz = idle_fclk_mhz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_IDLE_HARDMINS;
(*num_steps)++;
}
/* CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK, CLK_MGR401_INDICATE_DRR_STATUS*/
if (clk_mgr_base->clks.fw_based_mclk_switching != new_clocks->fw_based_mclk_switching) {
clk_mgr_base->clks.fw_based_mclk_switching = new_clocks->fw_based_mclk_switching;
block_sequence[*num_steps].params.update_wait_for_dmub_ack_params.enable = clk_mgr_base->clks.fw_based_mclk_switching;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK;
(*num_steps)++;
block_sequence[*num_steps].params.indicate_drr_status_params.mod_drr_for_pstate = clk_mgr_base->clks.fw_based_mclk_switching;
block_sequence[*num_steps].func = CLK_MGR401_INDICATE_DRR_STATUS;
(*num_steps)++;
}
/* set FCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if ((update_active_fclk || update_idle_fclk)) {
/* disable FCLK P-State support if needed */
if (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
block_sequence[*num_steps].params.update_fclk_pstate_support_params.support = FCLK_PSTATE_NOTSUPPORTED;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
(*num_steps)++;
}
/* No need to send active FCLK hardmin, automatically set based on DCFCLK */
// block_sequence[*num_steps].update_hardmin_params.clk_mgr = clk_mgr;
// block_sequence[*num_steps].update_hardmin_params.ppclk = PPCLK_FCLK;
// block_sequence[*num_steps].update_hardmin_params.freq_mhz = active_fclk_mhz;
// block_sequence[*num_steps].update_hardmin_params.response = NULL;
// block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK;
// (*num_steps)++;
}
/* CLK_MGR401_UPDATE_CAB_FOR_UCLK */
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) {
block_sequence[*num_steps].params.update_cab_for_uclk_params.num_ways = clk_mgr_base->clks.num_ways;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_CAB_FOR_UCLK;
(*num_steps)++;
}
}
/* DTBCLK */
if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DTBCLK))
new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
if (!dc->debug.disable_dtb_ref_clk_switch &&
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) && //TODO these should be ceiled
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DTBCLK)) {
/* DCCG requires KHz precision for DTBCLK */
block_sequence[*num_steps].params.update_hardmin_params.ppclk = PPCLK_DTBCLK;
block_sequence[*num_steps].params.update_hardmin_params.freq_mhz = khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz);
block_sequence[*num_steps].params.update_hardmin_params.response = &clk_mgr_base->clks.ref_dtbclk_khz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK;
(*num_steps)++;
/* Update DTO in DCCG */
block_sequence[*num_steps].params.update_dtbclk_dto_params.context = context;
block_sequence[*num_steps].params.update_dtbclk_dto_params.ref_dtbclk_khz = clk_mgr_base->clks.ref_dtbclk_khz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DTBCLK_DTO;
(*num_steps)++;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dppclk_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
update_dispclk = true;
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dppclk_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
block_sequence[*num_steps].params.update_dppclk_dto_params.context = context;
block_sequence[*num_steps].params.update_dppclk_dto_params.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
block_sequence[*num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO;
(*num_steps)++;
block_sequence[*num_steps].params.update_dentist_params.context = context;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DENTIST;
(*num_steps)++;
if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DPPCLK)) {
block_sequence[*num_steps].params.update_hardmin_optimized_params.ppclk = PPCLK_DPPCLK;
block_sequence[*num_steps].params.update_hardmin_optimized_params.freq_khz = clk_mgr_base->clks.dppclk_khz;
block_sequence[*num_steps].params.update_hardmin_optimized_params.response = &clk_mgr_base->clks.actual_dppclk_khz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED;
(*num_steps)++;
block_sequence[*num_steps].params.update_dppclk_dto_params.context = context;
block_sequence[*num_steps].params.update_dppclk_dto_params.dppclk_khz = clk_mgr_base->clks.actual_dppclk_khz;
block_sequence[*num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO;
(*num_steps)++;
}
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DPPCLK)) {
block_sequence[*num_steps].params.update_hardmin_optimized_params.ppclk = PPCLK_DPPCLK;
block_sequence[*num_steps].params.update_hardmin_optimized_params.freq_khz = clk_mgr_base->clks.dppclk_khz;
block_sequence[*num_steps].params.update_hardmin_optimized_params.response = &clk_mgr_base->clks.actual_dppclk_khz;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED;
(*num_steps)++;
}
if (update_dppclk || update_dispclk) {
block_sequence[*num_steps].params.update_dentist_params.context = context;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DENTIST;
(*num_steps)++;
}
block_sequence[*num_steps].params.update_dppclk_dto_params.context = context;
block_sequence[*num_steps].params.update_dppclk_dto_params.dppclk_khz = clk_mgr_base->clks.actual_dppclk_khz;
block_sequence[*num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO;
(*num_steps)++;
}
}
if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
block_sequence[*num_steps].params.update_psr_wait_loop_params.dmcu = dmcu;
block_sequence[*num_steps].params.update_psr_wait_loop_params.wait = clk_mgr_base->clks.dispclk_khz / 1000 / 7;
block_sequence[*num_steps].func = CLK_MGR401_UPDATE_PSR_WAIT_LOOP;
(*num_steps)++;
}
}
static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
unsigned int num_steps = 0;
unsigned int i;
union dcn401_clk_mgr_block_sequence_params *params;
if (dc->work_arounds.skip_clock_update)
return;
if (dc->debug.enable_legacy_clock_update) {
dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
return;
}
/* build clock update sequence */
dcn401_build_update_clocks_sequence(clk_mgr_base,
context,
safe_to_lower,
&num_steps);
/* execute sequence */
for (i = 0; i < num_steps; i++) {
params = &clk_mgr401->block_sequence[i].params;
switch (clk_mgr401->block_sequence[i].func) {
case CLK_MGR401_READ_CLOCKS_FROM_DENTIST:
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
break;
case CLK_MGR401_UPDATE_NUM_DISPLAYS:
dcn401_smu_set_num_of_displays(clk_mgr_internal,
params->update_num_displays_params.num_displays);
break;
case CLK_MGR401_UPDATE_HARDMIN_PPCLK:
if (params->update_hardmin_params.response)
*params->update_hardmin_params.response = dcn401_smu_set_hard_min_by_freq(
clk_mgr_internal,
params->update_hardmin_params.ppclk,
params->update_hardmin_params.freq_mhz);
else
dcn401_smu_set_hard_min_by_freq(clk_mgr_internal,
params->update_hardmin_params.ppclk,
params->update_hardmin_params.freq_mhz);
break;
case CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED:
if (params->update_hardmin_optimized_params.response)
*params->update_hardmin_optimized_params.response = dcn401_set_hard_min_by_freq_optimized(
clk_mgr_internal,
params->update_hardmin_optimized_params.ppclk,
params->update_hardmin_optimized_params.freq_khz);
else
dcn401_set_hard_min_by_freq_optimized(clk_mgr_internal,
params->update_hardmin_optimized_params.ppclk,
params->update_hardmin_optimized_params.freq_khz);
break;
case CLK_MGR401_UPDATE_ACTIVE_HARDMINS:
dcn401_smu_set_active_uclk_fclk_hardmin(
clk_mgr_internal,
params->update_idle_hardmin_params.uclk_mhz,
params->update_idle_hardmin_params.fclk_mhz);
break;
case CLK_MGR401_UPDATE_IDLE_HARDMINS:
dcn401_smu_set_idle_uclk_fclk_hardmin(
clk_mgr_internal,
params->update_idle_hardmin_params.uclk_mhz,
params->update_idle_hardmin_params.fclk_mhz);
break;
case CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK:
dcn401_smu_set_min_deep_sleep_dcef_clk(
clk_mgr_internal,
params->update_deep_sleep_dcfclk_params.freq_mhz);
break;
case CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT:
dcn401_smu_send_fclk_pstate_message(
clk_mgr_internal,
params->update_fclk_pstate_support_params.support);
break;
case CLK_MGR401_UPDATE_CAB_FOR_UCLK:
dcn401_smu_send_cab_for_uclk_message(
clk_mgr_internal,
params->update_cab_for_uclk_params.num_ways);
break;
case CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK:
dcn401_smu_wait_for_dmub_ack_mclk(
clk_mgr_internal,
params->update_wait_for_dmub_ack_params.enable);
break;
case CLK_MGR401_INDICATE_DRR_STATUS:
dcn401_smu_indicate_drr_status(
clk_mgr_internal,
params->indicate_drr_status_params.mod_drr_for_pstate);
break;
case CLK_MGR401_UPDATE_DPPCLK_DTO:
dcn401_update_clocks_update_dpp_dto(
clk_mgr_internal,
params->update_dppclk_dto_params.context,
params->update_dppclk_dto_params.safe_to_lower,
params->update_dppclk_dto_params.dppclk_khz);
break;
case CLK_MGR401_UPDATE_DTBCLK_DTO:
dcn401_update_clocks_update_dtb_dto(
clk_mgr_internal,
params->update_dtbclk_dto_params.context,
params->update_dtbclk_dto_params.ref_dtbclk_khz);
break;
case CLK_MGR401_UPDATE_DENTIST:
dcn401_update_clocks_update_dentist(
clk_mgr_internal,
params->update_dentist_params.context);
break;
case CLK_MGR401_UPDATE_PSR_WAIT_LOOP:
params->update_psr_wait_loop_params.dmcu->funcs->set_psr_wait_loop(
params->update_psr_wait_loop_params.dmcu,
params->update_psr_wait_loop_params.wait);
break;
default:
/* this should never happen */
BREAK_TO_DEBUGGER();
break;
}
}
}
static uint32_t dcn401_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
struct fixed31_32 pll_req;
......
......@@ -5,8 +5,102 @@
#ifndef __DCN401_CLK_MGR_H_
#define __DCN401_CLK_MGR_H_
#define DCN401_CLK_MGR_MAX_SEQUENCE_SIZE 30
union dcn401_clk_mgr_block_sequence_params {
struct {
/* inputs */
uint32_t num_displays;
} update_num_displays_params;
struct {
/* inputs */
uint32_t ppclk;
uint16_t freq_mhz;
/* outputs */
uint32_t *response;
} update_hardmin_params;
struct {
/* inputs */
uint32_t ppclk;
int freq_khz;
/* outputs */
uint32_t *response;
} update_hardmin_optimized_params;
struct {
/* inputs */
uint16_t uclk_mhz;
uint16_t fclk_mhz;
} update_idle_hardmin_params;
struct {
/* inputs */
uint16_t freq_mhz;
} update_deep_sleep_dcfclk_params;
struct {
/* inputs */
bool support;
} update_fclk_pstate_support_params;
struct {
/* inputs */
unsigned int num_ways;
} update_cab_for_uclk_params;
struct {
/* inputs */
bool enable;
} update_wait_for_dmub_ack_params;
struct {
/* inputs */
bool mod_drr_for_pstate;
} indicate_drr_status_params;
struct {
/* inputs */
struct dc_state *context;
int dppclk_khz;
bool safe_to_lower;
} update_dppclk_dto_params;
struct {
/* inputs */
struct dc_state *context;
int ref_dtbclk_khz;
} update_dtbclk_dto_params;
struct {
/* inputs */
struct dc_state *context;
int ref_dtbclk_khz;
} update_dentist_params;
struct {
/* inputs */
struct dmcu *dmcu;
unsigned int wait;
} update_psr_wait_loop_params;
};
enum dcn401_clk_mgr_block_sequence_func {
CLK_MGR401_READ_CLOCKS_FROM_DENTIST,
CLK_MGR401_UPDATE_NUM_DISPLAYS,
CLK_MGR401_UPDATE_HARDMIN_PPCLK,
CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED,
CLK_MGR401_UPDATE_ACTIVE_HARDMINS,
CLK_MGR401_UPDATE_IDLE_HARDMINS,
CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK,
CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT,
CLK_MGR401_UPDATE_CAB_FOR_UCLK,
CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK,
CLK_MGR401_INDICATE_DRR_STATUS,
CLK_MGR401_UPDATE_DPPCLK_DTO,
CLK_MGR401_UPDATE_DTBCLK_DTO,
CLK_MGR401_UPDATE_DENTIST,
CLK_MGR401_UPDATE_PSR_WAIT_LOOP,
};
struct dcn401_clk_mgr_block_sequence {
union dcn401_clk_mgr_block_sequence_params params;
enum dcn401_clk_mgr_block_sequence_func func;
};
struct dcn401_clk_mgr {
struct clk_mgr_internal base;
struct dcn401_clk_mgr_block_sequence block_sequence[DCN401_CLK_MGR_MAX_SEQUENCE_SIZE];
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
......
......@@ -105,6 +105,7 @@ void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
bool hard_min_done = false;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
......@@ -114,7 +115,84 @@ unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, u
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetHardMinByFreq, param, &response);
smu_print("SMU Frequency set = %d KHz\n", response);
/* wait until hardmin acknowledged */
//hard_min_done = dcn401_smu_wait_get_hard_min_status(clk_mgr, clk);
smu_print("SMU Frequency set = %d KHz hard_min_done %d\n", response, hard_min_done);
return response;
}
void dcn401_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("SMU to wait for DMCUB ack for MCLK : %d\n", enable);
dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetAlwaysWaitDmcubResp, enable ? 1 : 0, NULL);
}
void dcn401_smu_indicate_drr_status(struct clk_mgr_internal *clk_mgr, bool mod_drr_for_pstate)
{
smu_print("SMU Set indicate drr status = %d\n", mod_drr_for_pstate);
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_IndicateDrrStatus, mod_drr_for_pstate ? 1 : 0, NULL);
}
bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz)
{
uint32_t response = 0;
bool success;
/* 15:0 for uclk, 32:16 for fclk */
uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz;
smu_print("SMU Set idle hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz);
success = dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_IdleUclkFclk, param, &response);
/* wait until hardmin acknowledged */
//success &= dcn401_smu_wait_get_hard_min_status(clk_mgr, PPCLK_UCLK);
smu_print("SMU hard_min_done %d\n", success);
return success;
}
bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz)
{
uint32_t response = 0;
bool success;
/* 15:0 for uclk, 32:16 for fclk */
uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz;
smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz);
success = dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_ActiveUclkFclk, param, &response);
/* wait until hardmin acknowledged */
//success &= dcn401_smu_wait_get_hard_min_status(clk_mgr, PPCLK_UCLK);
smu_print("SMU hard_min_done %d\n", success);
return success;
}
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
{
smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz);
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetMinDeepSleepDcfclk, freq_mhz, NULL);
}
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays)
{
smu_print("SMU Set num of displays: num_displays = %d\n", num_displays);
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
......@@ -17,5 +17,15 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
void dcn401_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn401_smu_indicate_drr_status(struct clk_mgr_internal *clk_mgr, bool mod_drr_for_pstate);
bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz);
bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz);
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
......@@ -204,7 +204,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_3_51;
break;
case AMDGPU_FAMILY_GC_12_0_0:
if (ASICREV_IS_DCN401(asic_id.hw_internal_rev))
if (ASICREV_IS_GC_12_0_1_A0(asic_id.hw_internal_rev) ||
ASICREV_IS_GC_12_0_0_A0(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_4_01;
break;
default:
......
......@@ -610,6 +610,8 @@ struct dc_clocks {
int max_supported_dispclk_khz;
int bw_dppclk_khz; /*a copy of dppclk_khz*/
int bw_dispclk_khz;
int idle_dramclk_khz;
int idle_fclk_khz;
};
struct dc_bw_validation_profile {
......@@ -1035,6 +1037,7 @@ struct dc_debug_options {
uint32_t dml21_force_pstate_method_value;
uint32_t dml21_disable_pstate_method_mask;
union dmub_fams2_global_feature_config fams2_config;
bool enable_legacy_clock_update;
unsigned int force_cositing;
};
......
......@@ -266,6 +266,9 @@ enum {
GC_12_UNKNOWN = 0xFF,
};
#define ASICREV_IS_GC_12_0_1_A0(eChipRev) (eChipRev >= GC_12_0_1_A0 && eChipRev < GC_12_0_0_A0)
#define ASICREV_IS_GC_12_0_0_A0(eChipRev) (eChipRev >= GC_12_0_0_A0 && eChipRev < 0xFF)
#define ASICREV_IS_DCN4(eChipRev) (eChipRev >= GC_12_0_1_A0 && eChipRev < GC_12_0_0_A0)
#define ASICREV_IS_DCN401(eChipRev) (eChipRev >= GC_12_0_0_A0 && eChipRev < GC_12_UNKNOWN)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment