Commit 0701117e authored by Alvin Lee's avatar Alvin Lee Committed by Alex Deucher

Revert "drm/amd/display: For FPO and SubVP/DRR configs program vmin/max sel"

This reverts commit 6b2b782a.

Since, it was causing regression for some DRR scenarios.
Reviewed-by: default avatarAric Cyr <aric.cyr@amd.com>
Reviewed-by: default avatarNevenko Stupar <nevenko.stupar@amd.com>
Acked-by: default avatarHamza Mahfooz <hamza.mahfooz@amd.com>
Signed-off-by: default avatarAlvin Lee <alvin.lee2@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 84d2ae7c
......@@ -411,12 +411,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
if (dc->optimized_required)
if (dc->optimized_required || dc->wm_optimized_required)
return false;
if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
return true;
dc_exit_ips_for_hw_access(dc);
stream->adjust.v_total_max = adjust->v_total_max;
......@@ -2256,6 +2253,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
......@@ -2678,6 +2676,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
dc->optimized_required |= dc->wm_optimized_required;
}
return type;
......@@ -2885,6 +2885,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
if (update->crtc_timing_adjust)
stream->adjust = *update->crtc_timing_adjust;
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
......@@ -3513,33 +3516,6 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
static void update_drr_for_full_update(struct dc *dc, struct dc_state *context)
{
uint32_t i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe->stream;
struct timing_generator *tg = pipe->stream_res.tg;
struct drr_params params = {0};
/* pipe not in use */
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
/* skip phantom pipes */
if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
if (pipe->stream_res.tg->funcs->set_drr)
tg->funcs->set_drr(pipe->stream_res.tg, &params);
}
}
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
......@@ -3909,10 +3885,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
// Update DRR for all pipes
if (update_type != UPDATE_TYPE_FAST)
update_drr_for_full_update(dc, context);
current_stream_mask = get_stream_mask(dc, context);
if (current_stream_mask != context->stream_mask) {
context->stream_mask = current_stream_mask;
......@@ -4353,7 +4325,8 @@ static bool full_update_required(struct dc *dc,
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
stream_update->pending_test_pattern))
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust))
return true;
if (stream) {
......
......@@ -5027,20 +5027,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
bool resource_subvp_in_use(struct dc *dc,
struct dc_state *context)
{
uint32_t i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
return true;
}
return false;
}
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
{
if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
......
......@@ -1039,6 +1039,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
......
......@@ -139,6 +139,7 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
......@@ -325,6 +326,7 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
......
......@@ -183,6 +183,20 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
return true;
}
bool dcn32_subvp_in_use(struct dc *dc,
struct dc_state *context)
{
uint32_t i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
return true;
}
return false;
}
bool dcn32_mpo_in_use(struct dc_state *context)
{
uint32_t i;
......
......@@ -33,7 +33,6 @@
#include "dcn30/dcn30_resource.h"
#include "link.h"
#include "dc_state_priv.h"
#include "resource.h"
#define DC_LOGGER_INIT(logger)
......@@ -292,7 +291,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
resource_subvp_in_use(dc, context))
dcn32_subvp_in_use(dc, context))
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
......@@ -2280,7 +2279,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
unsigned int dummy_latency_index = 0;
int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
bool subvp_active = resource_subvp_in_use(dc, context);
bool subvp_in_use = dcn32_subvp_in_use(dc, context);
unsigned int min_dram_speed_mts_margin;
bool need_fclk_lat_as_dummy = false;
bool is_subvp_p_drr = false;
......@@ -2289,7 +2288,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc_assert_fp_enabled();
/* need to find dummy latency index for subvp */
if (subvp_active) {
if (subvp_in_use) {
/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
if (!pstate_en) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
......@@ -2475,7 +2474,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
}
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_active) {
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
/* find largest table entry that is lower than dram speed,
* but lower than DPM0 still uses DPM0
*/
......@@ -3535,7 +3534,7 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
{
// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || resource_subvp_in_use(dc, context)) &&
if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
dc->dml.soc.num_chans <= 8) {
int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
......
......@@ -3129,7 +3129,7 @@ void dcn10_prepare_bandwidth(
context,
false);
dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
......
......@@ -2269,10 +2269,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
* For assigning optimized_required, use |= operator since we don't want
* For assigning wm_optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
......@@ -2285,10 +2285,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
......
......@@ -609,9 +609,6 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
struct pipe_ctx *sec_pipe,
bool odm);
bool resource_subvp_in_use(struct dc *dc,
struct dc_state *context);
/* A test harness interface that modifies dp encoder resources in the given dc
* state and bypasses the need to revalidate. The interface assumes that the
* test harness interface is called with pre-validated link config stored in the
......
......@@ -1913,7 +1913,7 @@ int dcn32_populate_dml_pipes_from_context(
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = resource_subvp_in_use,
.get_subvp_en = dcn32_subvp_in_use,
};
void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
......
......@@ -131,6 +131,9 @@ void dcn32_merge_pipes_for_subvp(struct dc *dc,
bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct dc_state *context);
bool dcn32_subvp_in_use(struct dc *dc,
struct dc_state *context);
bool dcn32_mpo_in_use(struct dc_state *context);
bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
......
......@@ -1574,7 +1574,7 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool)
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = resource_subvp_in_use,
.get_subvp_en = dcn32_subvp_in_use,
};
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment