Commit e69d4335 authored by Alvin Lee's avatar Alvin Lee Committed by Alex Deucher

drm/amd/display: Move fpo_in_use to stream_status

[Description]
Refactor code and move fpo_in_use into stream_status to avoid
unexpected changes to previous dc_state (i.e., current_state).
Since stream pointers are shared between current and new dc_states,
updating parameters of one stream will update the other as well
which causes unexpected behaviors (i.e., checking that fpo_in_use
isn't set in previous state and set in the new state is invalid).
To avoid incorrect updates to current_state, move the fpo_in_use flag
into dc_stream_status since stream_status is owned by dc and are not
shared between different dc_states.
Reviewed-by: default avatarSamson Tam <samson.tam@amd.com>
Acked-by: default avatarZaeem Mohamed <zaeem.mohamed@amd.com>
Signed-off-by: default avatarAlvin Lee <alvin.lee2@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4621e10e
...@@ -427,6 +427,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru ...@@ -427,6 +427,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled; uint8_t visual_confirm_enabled;
int pipe_idx = 0; int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL) if (dc == NULL)
return false; return false;
...@@ -443,6 +444,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru ...@@ -443,6 +444,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
stream_status = NULL;
if (!pipe->stream) if (!pipe->stream)
continue; continue;
...@@ -450,7 +452,8 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru ...@@ -450,7 +452,8 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
* that does not use FAMS, we are in an FPO + VActive scenario. * that does not use FAMS, we are in an FPO + VActive scenario.
* Assign vactive stretch margin in this case. * Assign vactive stretch margin in this case.
*/ */
if (!pipe->stream->fpo_in_use) { stream_status = dc_state_get_stream_status(context, pipe->stream);
if (stream_status && !stream_status->fpo_in_use) {
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break; break;
} }
...@@ -461,7 +464,12 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru ...@@ -461,7 +464,12 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) { stream_status = NULL;
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
stream_status = dc_state_get_stream_status(context, pipe->stream);
if (stream_status && stream_status->fpo_in_use) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
......
...@@ -59,6 +59,7 @@ struct dc_stream_status { ...@@ -59,6 +59,7 @@ struct dc_stream_status {
struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
bool is_abm_supported; bool is_abm_supported;
struct mall_stream_config mall_stream_config; struct mall_stream_config mall_stream_config;
bool fpo_in_use;
}; };
enum hubp_dmdata_mode { enum hubp_dmdata_mode {
...@@ -296,7 +297,6 @@ struct dc_stream_state { ...@@ -296,7 +297,6 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk; bool has_non_synchronizable_pclk;
bool vblank_synchronized; bool vblank_synchronized;
bool fpo_in_use;
bool is_phantom; bool is_phantom;
struct luminance_data lumin_data; struct luminance_data lumin_data;
......
...@@ -387,13 +387,17 @@ void dcn30_fpu_calculate_wm_and_dlg( ...@@ -387,13 +387,17 @@ void dcn30_fpu_calculate_wm_and_dlg(
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0; unsigned int dummy_latency_index = 0;
struct dc_stream_status *stream_status = NULL;
dc_assert_fp_enabled(); dc_assert_fp_enabled();
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
stream_status = NULL;
if (context->streams[i]) if (context->streams[i])
context->streams[i]->fpo_in_use = false; stream_status = dc_state_get_stream_status(context, context->streams[i]);
if (stream_status)
stream_status->fpo_in_use = false;
} }
if (!pstate_en) { if (!pstate_en) {
......
...@@ -2309,6 +2309,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, ...@@ -2309,6 +2309,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
bool need_fclk_lat_as_dummy = false; bool need_fclk_lat_as_dummy = false;
bool is_subvp_p_drr = false; bool is_subvp_p_drr = false;
struct dc_stream_state *fpo_candidate_stream = NULL; struct dc_stream_state *fpo_candidate_stream = NULL;
struct dc_stream_status *stream_status = NULL;
dc_assert_fp_enabled(); dc_assert_fp_enabled();
...@@ -2343,8 +2344,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, ...@@ -2343,8 +2344,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
stream_status = NULL;
if (context->streams[i]) if (context->streams[i])
context->streams[i]->fpo_in_use = false; stream_status = dc_state_get_stream_status(context, context->streams[i]);
if (stream_status)
stream_status->fpo_in_use = false;
} }
if (!pstate_en || (!dc->debug.disable_fpo_optimizations && if (!pstate_en || (!dc->debug.disable_fpo_optimizations &&
...@@ -2352,7 +2356,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, ...@@ -2352,7 +2356,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
/* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */ /* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context); fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (fpo_candidate_stream) { if (fpo_candidate_stream) {
fpo_candidate_stream->fpo_in_use = true; stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
if (stream_status)
stream_status->fpo_in_use = true;
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
} }
...@@ -2389,8 +2395,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, ...@@ -2389,8 +2395,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/ */
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
stream_status = NULL;
if (context->streams[i]) if (context->streams[i])
context->streams[i]->fpo_in_use = false; stream_status = dc_state_get_stream_status(context, context->streams[i]);
if (stream_status)
stream_status->fpo_in_use = false;
} }
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
......
...@@ -601,9 +601,13 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) ...@@ -601,9 +601,13 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp; struct hubp *hubp = pipe->plane_res.hubp;
struct dc_stream_status *stream_status = NULL;
if (pipe->stream)
stream_status = dc_state_get_stream_status(context, pipe->stream);
if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) { (stream_status && stream_status->fpo_in_use))) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
...@@ -617,6 +621,8 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) ...@@ -617,6 +621,8 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp; struct hubp *hubp = pipe->plane_res.hubp;
struct dc_stream_status *stream_status = NULL;
struct dc_stream_status *old_stream_status = NULL;
/* Today for MED update type we do not call update clocks. However, for FPO /* Today for MED update type we do not call update clocks. However, for FPO
* the assumption is that update clocks should be called to disable P-State * the assumption is that update clocks should be called to disable P-State
...@@ -630,11 +636,15 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) ...@@ -630,11 +636,15 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
* time SubVP / FPO was enabled, so there's no need to update / reset it if the * time SubVP / FPO was enabled, so there's no need to update / reset it if the
* pipe config has never exited SubVP / FPO. * pipe config has never exited SubVP / FPO.
*/ */
if (pipe->stream)
stream_status = dc_state_get_stream_status(context, pipe->stream);
if (old_pipe->stream)
old_stream_status = dc_state_get_stream_status(dc->current_state, old_pipe->stream);
if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use) && (stream_status && stream_status->fpo_in_use)) &&
(!old_pipe->stream || (!old_pipe->stream || (dc_state_get_pipe_subvp_type(context, old_pipe) != SUBVP_MAIN &&
(dc_state_get_pipe_subvp_type(context, old_pipe) != SUBVP_MAIN && (old_stream_status && !old_stream_status->fpo_in_use)))) {
!old_pipe->stream->fpo_in_use))) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
......
...@@ -1966,6 +1966,7 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, ...@@ -1966,6 +1966,7 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc,
{ {
int refresh_rate = 0; int refresh_rate = 0;
const int minimum_refreshrate_supported = 120; const int minimum_refreshrate_supported = 120;
struct dc_stream_status *stream_status = NULL;
if (context == NULL || context->streams[0] == NULL) if (context == NULL || context->streams[0] == NULL)
return false; return false;
...@@ -1999,7 +2000,12 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, ...@@ -1999,7 +2000,12 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc,
if (context->streams[0]->vrr_active_variable && (dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE)) if (context->streams[0]->vrr_active_variable && (dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE))
return false; return false;
context->streams[0]->fpo_in_use = true; stream_status = dc_state_get_stream_status(context, context->streams[0]);
if (!stream_status)
return false;
stream_status->fpo_in_use = true;
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment