Commit 85f4bc0c authored by Alvin Lee's avatar Alvin Lee Committed by Alex Deucher

drm/amd/display: Add SubVP required code

This commit enables the SubVP feature. To achieve that, we need to:

- Don't force p-state disallow on SubVP (can't block dummy p-state)
- Send calculated watermark to DMCUB for SubVP
- Adjust CAB mode message to PMFW
- Add a proper locking sequence for SubVP
- Various fixes to SubVP static analysis and determining SubVP config
- Currently SubVP not supported with pipe split so merge all pipes
  before setting up SubVp
Reviewed-by: default avatarJun Lei <Jun.Lei@amd.com>
Acked-by: default avatarRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Acked-by: default avatarAlan Liu <HaoPing.Liu@amd.com>
Signed-off-by: default avatarAlvin Lee <Alvin.Lee2@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e72f03f4
...@@ -21,7 +21,31 @@ ...@@ -21,7 +21,31 @@
# #
# #
# Makefile for Display Core (dc) component. # Makefile for Display Core (dc) component.
#
ifdef CONFIG_X86
dmub_ccflags := -mhard-float -msse
endif
ifdef CONFIG_PPC64
dmub_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
endif
ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# (8B stack alignment).
dmub_ccflags += -mpreferred-stack-boundary=4
else
dmub_ccflags += -msse2
endif
endif
DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
...@@ -75,6 +99,7 @@ AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) ...@@ -75,6 +99,7 @@ AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
DC_DMUB += dc_dmub_srv.o DC_DMUB += dc_dmub_srv.o
CFLAGS_$(AMDDALPATH)/dc/dc_dmub_srv.o := $(dmub_ccflags)
DC_EDID += dc_edid_parser.o DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID)) AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
......
...@@ -58,6 +58,12 @@ int clk_mgr_helper_get_active_display_cnt( ...@@ -58,6 +58,12 @@ int clk_mgr_helper_get_active_display_cnt(
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i]; const struct dc_stream_state *stream = context->streams[i];
/* Don't count SubVP phantom pipes as part of active
* display count
*/
if (stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/* /*
* Only notify active stream or virtual stream. * Only notify active stream or virtual stream.
* Need to notify virtual stream to work around * Need to notify virtual stream to work around
......
...@@ -100,9 +100,10 @@ void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool e ...@@ -100,9 +100,10 @@ void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool e
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways) void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways)
{ {
smu_print("Numways for SubVP : %d\n", num_ways); uint32_t param = (num_ways << 1) | (num_ways > 0);
dcn32_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, num_ways, NULL); dcn32_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, param, NULL);
smu_print("Numways for SubVP : %d\n", num_ways);
} }
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
......
...@@ -1905,7 +1905,8 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) ...@@ -1905,7 +1905,8 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe = &context->res_ctx.pipe_ctx[i]; pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state) // Don't check flip pending on phantom pipes
if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
continue; continue;
/* Must set to false to start with, due to OR in update function */ /* Must set to false to start with, due to OR in update function */
...@@ -2917,6 +2918,13 @@ static void commit_planes_for_stream(struct dc *dc, ...@@ -2917,6 +2918,13 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j; int i, j;
struct pipe_ctx *top_pipe_to_program = NULL; struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
dc_z10_restore(dc); dc_z10_restore(dc);
...@@ -2955,6 +2963,15 @@ static void commit_planes_for_stream(struct dc *dc, ...@@ -2955,6 +2963,15 @@ static void commit_planes_for_stream(struct dc *dc,
} }
} }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
// Check old context for SubVP
subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe; struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe; struct pipe_ctx *odm_pipe;
...@@ -2984,8 +3001,13 @@ static void commit_planes_for_stream(struct dc *dc, ...@@ -2984,8 +3001,13 @@ static void commit_planes_for_stream(struct dc *dc,
} }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
dc->hwss.interdependent_update_lock(dc, context, true); dc->hwss.interdependent_update_lock(dc, context, true);
} else { } else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
/* Lock the top pipe while updating plane addrs, since freesync requires /* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized. * plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL * top_pipe_to_program is expected to never be NULL
...@@ -2993,8 +3015,40 @@ static void commit_planes_for_stream(struct dc *dc, ...@@ -2993,8 +3015,40 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
} }
if (update_type != UPDATE_TYPE_FAST) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
subvp_prev_use) {
// If old context or new context has phantom pipes, apply
// the phantom timings now. We can't change the phantom
// pipe configuration safely without driver acquiring
// the DMCUB lock first.
dc->hwss.apply_ctx_to_hw(dc, context);
break;
}
}
}
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
if (update_type != UPDATE_TYPE_FAST) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
subvp_prev_use) {
// If old context or new context has phantom pipes, apply
// the phantom timings now. We can't change the phantom
// pipe configuration safely without driver acquiring
// the DMCUB lock first.
dc->hwss.apply_ctx_to_hw(dc, context);
break;
}
}
}
// Stream updates // Stream updates
if (stream_update) if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
...@@ -3009,11 +3063,20 @@ static void commit_planes_for_stream(struct dc *dc, ...@@ -3009,11 +3063,20 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx) if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context); dc->hwss.program_front_end_for_ctx(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false); dc->hwss.interdependent_update_lock(dc, context, false);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
} else { } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
} }
dc->hwss.post_unlock_program_front_end(dc, context); dc->hwss.post_unlock_program_front_end(dc, context);
return; return;
} }
......
...@@ -207,7 +207,6 @@ struct dc_caps { ...@@ -207,7 +207,6 @@ struct dc_caps {
bool vbios_lttpr_aware; bool vbios_lttpr_aware;
bool vbios_lttpr_enable; bool vbios_lttpr_enable;
uint32_t max_otg_num; uint32_t max_otg_num;
#ifdef CONFIG_DRM_AMD_DC_DCN
uint32_t max_cab_allocation_bytes; uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size; uint32_t cache_line_size;
uint32_t cache_num_ways; uint32_t cache_num_ways;
...@@ -215,7 +214,6 @@ struct dc_caps { ...@@ -215,7 +214,6 @@ struct dc_caps {
uint16_t subvp_prefetch_end_to_mall_start_us; uint16_t subvp_prefetch_end_to_mall_start_us;
uint16_t subvp_pstate_allow_width_us; uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us; uint16_t subvp_vertical_int_margin_us;
#endif
bool seamless_odm; bool seamless_odm;
}; };
......
...@@ -145,7 +145,6 @@ struct test_pattern { ...@@ -145,7 +145,6 @@ struct test_pattern {
unsigned int cust_pattern_size; unsigned int cust_pattern_size;
}; };
#ifdef CONFIG_DRM_AMD_DC_DCN
#define SUBVP_DRR_MARGIN_US 500 // 500us for DRR margin (SubVP + DRR) #define SUBVP_DRR_MARGIN_US 500 // 500us for DRR margin (SubVP + DRR)
enum mall_stream_type { enum mall_stream_type {
...@@ -161,7 +160,6 @@ struct mall_stream_config { ...@@ -161,7 +160,6 @@ struct mall_stream_config {
enum mall_stream_type type; enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream struct dc_stream_state *paired_stream; // master / slave stream
}; };
#endif
struct dc_stream_state { struct dc_stream_state {
// sink is deprecated, new code should not reference // sink is deprecated, new code should not reference
...@@ -277,9 +275,7 @@ struct dc_stream_state { ...@@ -277,9 +275,7 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk; bool has_non_synchronizable_pclk;
bool vblank_synchronized; bool vblank_synchronized;
#ifdef CONFIG_DRM_AMD_DC_DCN
struct mall_stream_config mall_stream_config; struct mall_stream_config mall_stream_config;
#endif
}; };
#define ABM_LEVEL_IMMEDIATE_DISABLE 255 #define ABM_LEVEL_IMMEDIATE_DISABLE 255
......
...@@ -1308,6 +1308,15 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx ...@@ -1308,6 +1308,15 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
} }
return; return;
} }
/* For SubVP we need to unconditionally enable because any phantom pipes are
* always removed then newly added for every full updates whenever SubVP is in use.
* The remove-add sequence of the phantom pipe always results in the pipe
* being blanked in enable_stream_timing (DPG).
*/
if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
new_pipe->update_flags.bits.enable = 1;
if (old_pipe->plane_state && !new_pipe->plane_state) { if (old_pipe->plane_state && !new_pipe->plane_state) {
new_pipe->update_flags.bits.disable = 1; new_pipe->update_flags.bits.disable = 1;
return; return;
...@@ -1810,7 +1819,9 @@ void dcn20_post_unlock_program_front_end( ...@@ -1810,7 +1819,9 @@ void dcn20_post_unlock_program_front_end(
*/ */
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { // Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp; struct hubp *hubp = pipe->plane_res.hubp;
int j = 0; int j = 0;
...@@ -1864,18 +1875,34 @@ void dcn20_prepare_bandwidth( ...@@ -1864,18 +1875,34 @@ void dcn20_prepare_bandwidth(
{ {
struct hubbub *hubbub = dc->res_pool->hubbub; struct hubbub *hubbub = dc->res_pool->hubbub;
unsigned int compbuf_size_kb = 0; unsigned int compbuf_size_kb = 0;
unsigned int cache_wm_a = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns;
unsigned int i;
dc->clk_mgr->funcs->update_clocks( dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr, dc->clk_mgr,
context, context,
false); false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't restore the original watermark value
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks */ /* program dchubbub watermarks */
dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks, &context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false); false);
// Restore the real watermark so we can commit the value to DMCUB
// DMCUB uses the "original" watermark value in SubVP MCLK switch
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = cache_wm_a;
/* decrease compbuf size */ /* decrease compbuf size */
if (hubbub->funcs->program_compbuf_size) { if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes)
...@@ -1894,6 +1921,16 @@ void dcn20_optimize_bandwidth( ...@@ -1894,6 +1921,16 @@ void dcn20_optimize_bandwidth(
struct hubbub *hubbub = dc->res_pool->hubbub; struct hubbub *hubbub = dc->res_pool->hubbub;
int i; int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't need to restore the original watermark value
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks */ /* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub, hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks, &context->bw_ctx.bw.dcn.watermarks,
......
...@@ -13,20 +13,21 @@ ...@@ -13,20 +13,21 @@
DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_hwseq.o dcn32_init.o \ DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_hwseq.o dcn32_init.o \
dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \ dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \
dcn32_mpc.o dcn32_resource_helpers.o dcn32_mpc.o
ifdef CONFIG_X86 ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource.o := -mhard-float -msse dcn32_ccflags := -mhard-float -msse
endif endif
ifdef CONFIG_PPC64 ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource.o := -mhard-float -maltivec dcn32_ccflags := -mhard-float -maltivec
endif endif
ifdef CONFIG_CC_IS_GCC ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y) ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1 IS_OLD_GCC = 1
endif endif
dcn32_ccflags += -mhard-float
endif endif
ifdef CONFIG_X86 ifdef CONFIG_X86
...@@ -34,12 +35,15 @@ ifdef IS_OLD_GCC ...@@ -34,12 +35,15 @@ ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution. # Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# (8B stack alignment). # (8B stack alignment).
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource.o += -mpreferred-stack-boundary=4 dcn32_ccflags += -mpreferred-stack-boundary=4
else else
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource.o += -msse2 dcn32_ccflags += -msse2
endif endif
endif endif
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource_helpers.o := $(dcn32_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dcn32/dcn32_resource.o := $(dcn32_ccflags)
AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32)) AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN32) AMD_DISPLAY_FILES += $(AMD_DAL_DCN32)
...@@ -47,7 +47,10 @@ ...@@ -47,7 +47,10 @@
#include "clk_mgr.h" #include "clk_mgr.h"
#include "dsc.h" #include "dsc.h"
#include "dcn20/dcn20_optc.h" #include "dcn20/dcn20_optc.h"
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_link_dp.h" #include "dc_link_dp.h"
#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
...@@ -405,6 +408,65 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) ...@@ -405,6 +408,65 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
*/ */
} }
/* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
* 1. Any full update for any SubVP main pipe
* 2. Any immediate flip for any SubVP pipe
* 3. Any flip for DRR pipe
* 4. If SubVP was previously in use (i.e. in old context)
*/
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
bool lock,
bool should_lock_all_pipes,
struct pipe_ctx *top_pipe_to_program,
bool subvp_prev_use)
{
unsigned int i = 0;
bool subvp_immediate_flip = false;
bool subvp_in_use = false;
bool drr_pipe = false;
struct pipe_ctx *pipe, *old_pipe;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
}
if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) {
if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN &&
top_pipe_to_program->plane_state->flip_immediate)
subvp_immediate_flip = true;
else if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_NONE &&
top_pipe_to_program->stream->ignore_msa_timing_param)
drr_pipe = true;
}
if ((subvp_in_use && (should_lock_all_pipes || subvp_immediate_flip || drr_pipe)) || (!subvp_in_use && subvp_prev_use)) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
if (!lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
should_lock_all_pipes)
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
}
}
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
hw_lock_cmd.bits.lock = lock;
hw_lock_cmd.bits.should_release = !lock;
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
}
static bool dcn32_set_mpc_shaper_3dlut( static bool dcn32_set_mpc_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
{ {
...@@ -500,7 +562,11 @@ void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context) ...@@ -500,7 +562,11 @@ void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { // For SubVP + DRR, also force disallow on the DRR pipe
// (We will force allow in the DMUB sequence -- some DRR timings by default won't allow P-State so we have
// to force once the vblank is stretched).
if (pipe->stream && pipe->plane_state && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
(pipe->stream->mall_stream_config.type == SUBVP_NONE && pipe->stream->ignore_msa_timing_param))) {
struct hubp *hubp = pipe->plane_res.hubp; struct hubp *hubp = pipe->plane_res.hubp;
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
...@@ -544,9 +610,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) ...@@ -544,9 +610,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
{ {
int i; int i;
struct dce_hwseq *hws = dc->hwseq; struct dce_hwseq *hws = dc->hwseq;
// Update force P-state for each pipe accordingly
if (hws && hws->funcs.subvp_update_force_pstate) // Don't force p-state disallow -- can't block dummy p-state
hws->funcs.subvp_update_force_pstate(dc, context);
// Update MALL_SEL register for each pipe // Update MALL_SEL register for each pipe
if (hws && hws->funcs.update_mall_sel) if (hws && hws->funcs.update_mall_sel)
......
...@@ -63,4 +63,11 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * ...@@ -63,4 +63,11 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
bool lock,
bool should_lock_all_pipes,
struct pipe_ctx *top_pipe_to_program,
bool subvp_prev_use);
#endif /* __DC_HWSS_DCN32_H__ */ #endif /* __DC_HWSS_DCN32_H__ */
...@@ -102,6 +102,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { ...@@ -102,6 +102,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits, .get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config, .commit_subvp_config = dcn32_commit_subvp_config,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color, .update_visual_confirm_color = dcn20_update_visual_confirm_color,
}; };
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include "core_types.h" #include "core_types.h"
#define DCN3_2_DET_SEG_SIZE 64
#define TO_DCN32_RES_POOL(pool)\ #define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base) container_of(pool, struct dcn32_resource_pool, base)
...@@ -61,7 +63,7 @@ bool dcn32_release_post_bldn_3dlut( ...@@ -61,7 +63,7 @@ bool dcn32_release_post_bldn_3dlut(
struct dc_3dlut **lut, struct dc_3dlut **lut,
struct dc_transfer_func **shaper); struct dc_transfer_func **shaper);
void dcn32_remove_phantom_pipes(struct dc *dc, bool dcn32_remove_phantom_pipes(struct dc *dc,
struct dc_state *context); struct dc_state *context);
void dcn32_add_phantom_pipes(struct dc *dc, void dcn32_add_phantom_pipes(struct dc *dc,
...@@ -85,4 +87,20 @@ void dcn32_calculate_wm_and_dlg( ...@@ -85,4 +87,20 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt, int pipe_cnt,
int vlevel); int vlevel);
uint32_t dcn32_helper_calculate_num_ways_for_subvp
(struct dc *dc,
struct dc_state *context);
void dcn32_merge_pipes_for_subvp(struct dc *dc,
struct dc_state *context);
bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct dc_state *context);
bool dcn32_subvp_in_use(struct dc *dc,
struct dc_state *context);
void dcn32_update_det_override_for_mpo(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
#endif /* _DCN32_RESOURCE_H_ */ #endif /* _DCN32_RESOURCE_H_ */
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
// header file of functions being implemented
#include "dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
/**
* ********************************************************************************************
* dcn32_helper_populate_phantom_dlg_params: Get DLG params for phantom pipes and populate pipe_ctx
* with those params.
*
* This function must be called AFTER the phantom pipes are added to context and run through DML
* (so that the DLG params for the phantom pipes can be populated), and BEFORE we program the
* timing for the phantom pipes.
*
* @param [in] dc: current dc state
* @param [in] context: new dc state
* @param [in] pipes: DML pipe params array
* @param [in] pipe_cnt: DML pipe count
*
* @return: void
*
* ********************************************************************************************
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
uint32_t i, pipe_idx;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest;
}
pipe_idx++;
}
}
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
*
* This function first checks the bytes required per pixel on the SubVP pipe, then calculates
* the total number of pixels required in the SubVP MALL region. These are used to calculate
* the number of cache lines used (then number of ways required) for SubVP MCLK switching.
*
* @param [in] dc: current dc state
* @param [in] context: new dc state
*
* @return: number of ways required for SubVP
*
* ********************************************************************************************
*/
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
uint32_t total_cache_lines = 0;
uint32_t i = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the phantom pipes
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
cache_lines_used += (bytes_per_pixel * mall_region_pixels) / dc->caps.cache_line_size + 2;
}
}
total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
num_ways = cache_lines_used / lines_per_way;
if (cache_lines_used % lines_per_way > 0)
num_ways++;
return num_ways;
}
void dcn32_merge_pipes_for_subvp(struct dc *dc,
struct dc_state *context)
{
uint32_t i;
/* merge pipes if necessary */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// For now merge all pipes for SubVP since pipe split case isn't supported yet
/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
if (pipe->prev_odm_pipe) {
/*split off odm pipe*/
pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
pipe->top_pipe = NULL;
pipe->prev_odm_pipe = NULL;
if (pipe->stream_res.dsc)
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
top_pipe->bottom_pipe = bottom_pipe;
if (bottom_pipe)
bottom_pipe->top_pipe = top_pipe;
pipe->top_pipe = NULL;
pipe->bottom_pipe = NULL;
pipe->plane_state = NULL;
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
}
}
}
bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct dc_state *context)
{
uint32_t i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
if (!pipe->plane_state)
return false;
}
return true;
}
bool dcn32_subvp_in_use(struct dc *dc,
struct dc_state *context)
{
uint32_t i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
return true;
}
return false;
}
/* For MPO we adjust the DET allocation to ensure we have enough DET buffer when an MPO pipe
* is removed. For example for 1 MPO + 1 non-MPO normally we would allocate 6 DET segments
* for each pipe [6, 6, 6]. But when transitioning out of MPO it would change from
* [6, 6, 6] -> [9, 9]. However, if VUPDATE for the non-MPO pipe comes first we would be
* trying to allocate more DET than what's currently available which would result in underflow.
*
* In this case we must ensure there is enough buffer when transitioning in and out of MPO:
*
* 1 MPO (2 plane) + 1 non-MPO case:
* [4, 4, 9]<->[9, 9]: Allocate 4 each for MPO pipes, and maintain 9 for non-MPO pipe
*
* 1 MPO (2 plane) + 2 non-MPO case:
* [3, 3, 5, 5]<->[6, 6, 6]
*
* 1 MPO (3 plane) + 1 non-MPO case:
* [3, 3, 3, 9]<->[4, 4, 9] or [3, 3, 3, 6]<->[9, 9]
*
* For multi-display MPO case all pipes will have 4 segments:
* Removing MPO on one of the displays will result in 3 pipes
* (1 MPO and 1 non-MPO which is covered by single MPO stream case).
*/
void dcn32_update_det_override_for_mpo(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes)
{
uint8_t i, mpo_stream_index, pipe_cnt;
uint8_t mpo_stream_count = 0;
uint8_t mpo_planes = 0; // Only used in single display MPO case
unsigned int j;
struct resource_context *res_ctx = &context->res_ctx;
for (i = 0; i < context->stream_count; i++) {
if (context->stream_status[i].plane_count > 1) {
mpo_stream_index = i;
mpo_stream_count++;
mpo_planes = context->stream_status[i].plane_count;
}
}
if (mpo_stream_count == 1) {
for (j = 0, pipe_cnt = 0; j < dc->res_pool->pipe_count; j++) {
if (!res_ctx->pipe_ctx[j].stream)
continue;
if (context->res_ctx.pipe_ctx[j].stream == context->streams[mpo_stream_index]) {
// For 3 plane MPO + 1 non-MPO, do [3, 3, 3, 9]
// For 2 plane MPO + 1 non-MPO, do [4, 4, 9]
if (context->stream_count - mpo_stream_count == 1)
pipes[pipe_cnt].pipe.src.det_size_override = DCN3_2_DET_SEG_SIZE * (mpo_planes == 2 ? 4 : 3);
else if (context->stream_count - mpo_stream_count == 2)
pipes[pipe_cnt].pipe.src.det_size_override = DCN3_2_DET_SEG_SIZE * 3;
} else if (context->res_ctx.pipe_ctx[j].stream &&
context->res_ctx.pipe_ctx[j].stream != context->streams[mpo_stream_index]) {
// Update for non-MPO pipes
if (context->stream_count - mpo_stream_count == 1)
pipes[pipe_cnt].pipe.src.det_size_override = DCN3_2_DET_SEG_SIZE * 9;
else if (context->stream_count - mpo_stream_count == 2)
pipes[pipe_cnt].pipe.src.det_size_override = DCN3_2_DET_SEG_SIZE * 5;
}
pipe_cnt++;
}
}
}
...@@ -362,6 +362,7 @@ struct _vcs_dpi_display_pipe_source_params_st { ...@@ -362,6 +362,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
unsigned int hostvm_levels_force; unsigned int hostvm_levels_force;
int source_scan; int source_scan;
int source_rotation; // new in dml32 int source_rotation; // new in dml32
unsigned int det_size_override; // use to populate DETSizeOverride in vba struct
int sw_mode; int sw_mode;
int macro_tile_size; int macro_tile_size;
unsigned int surface_width_y; unsigned int surface_width_y;
......
...@@ -202,9 +202,8 @@ struct resource_funcs { ...@@ -202,9 +202,8 @@ struct resource_funcs {
display_e2e_pipe_params_st *pipes, display_e2e_pipe_params_st *pipes,
unsigned int pipe_cnt, unsigned int pipe_cnt,
unsigned int index); unsigned int index);
void (*remove_phantom_pipes)(
struct dc *dc, bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
struct dc_state *context);
}; };
struct audio_support{ struct audio_support{
......
...@@ -246,6 +246,13 @@ struct hw_sequencer_funcs { ...@@ -246,6 +246,13 @@ struct hw_sequencer_funcs {
int mpcc_id); int mpcc_id);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
bool should_lock_all_pipes,
struct pipe_ctx *top_pipe_to_program,
bool subvp_prev_use);
}; };
void color_space_to_black_color( void color_space_to_black_color(
......
...@@ -92,6 +92,9 @@ ...@@ -92,6 +92,9 @@
*/ */
#define NUM_BL_CURVE_SEGS 16 #define NUM_BL_CURVE_SEGS 16
/* Maximum number of SubVP streams */
#define DMUB_MAX_SUBVP_STREAMS 2
/* Maximum number of streams on any ASIC. */ /* Maximum number of streams on any ASIC. */
#define DMUB_MAX_STREAMS 6 #define DMUB_MAX_STREAMS 6
...@@ -689,6 +692,9 @@ enum dmub_cmd_type { ...@@ -689,6 +692,9 @@ enum dmub_cmd_type {
* Command type used for <TODO:description> * Command type used for <TODO:description>
*/ */
DMUB_CMD__CAB_FOR_SS = 75, DMUB_CMD__CAB_FOR_SS = 75,
DMUB_CMD__FW_ASSISTED_MCLK_SWITCH = 76,
/** /**
* Command type used for interfacing with DPIA. * Command type used for interfacing with DPIA.
*/ */
...@@ -942,6 +948,80 @@ struct dmub_rb_cmd_cab_for_ss { ...@@ -942,6 +948,80 @@ struct dmub_rb_cmd_cab_for_ss {
uint8_t cab_alloc_ways; /* total number of ways */ uint8_t cab_alloc_ways; /* total number of ways */
uint8_t debug_bits; /* debug bits */ uint8_t debug_bits; /* debug bits */
}; };
enum mclk_switch_mode {
NONE = 0,
FPO = 1,
SUBVP = 2,
VBLANK = 3,
};
/* Per pipe struct which stores the MCLK switch mode
* data to be sent to DMUB.
* Named "v2" for now -- once FPO and SUBVP are fully merged
* the type name can be updated
*/
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
union {
struct {
uint32_t pix_clk_100hz;
uint16_t main_vblank_start;
uint16_t main_vblank_end;
uint16_t mall_region_lines;
uint16_t prefetch_lines;
uint16_t prefetch_to_mall_start_lines;
uint16_t processing_delay_lines;
uint16_t htotal; // required to calculate line time for multi-display cases
uint16_t vtotal;
uint8_t main_pipe_index;
uint8_t phantom_pipe_index;
uint8_t padding[2];
} subvp_data;
struct {
uint32_t pix_clk_100hz;
uint16_t vblank_start;
uint16_t vblank_end;
uint16_t vstartup_start;
uint16_t vtotal;
uint16_t htotal;
uint8_t vblank_pipe_index;
uint8_t padding[2];
struct {
uint8_t drr_in_use;
uint8_t drr_window_size_ms; // Indicates largest VMIN/VMAX adjustment per frame
uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK
uint16_t max_vtotal_supported; // Max VTOTAL that can support SubVP static scheduling
uint8_t use_ramping; // Use ramping or not
} drr_info; // DRR considered as part of SubVP + VBLANK case
} vblank_data;
} pipe_config;
enum mclk_switch_mode mode;
};
/**
* Config data for Sub-VP and FPO
* Named "v2" for now -- once FPO and SUBVP are fully merged
* the type name can be updated
*/
struct dmub_cmd_fw_assisted_mclk_switch_config_v2 {
uint16_t watermark_a_cache;
uint8_t vertical_int_margin_us;
uint8_t pstate_allow_width_us;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 pipe_data[DMUB_MAX_SUBVP_STREAMS];
};
/**
* DMUB rb command definition for Sub-VP and FPO
* Named "v2" for now -- once FPO and SUBVP are fully merged
* the type name can be updated
*/
struct dmub_rb_cmd_fw_assisted_mclk_switch_v2 {
struct dmub_cmd_header header;
struct dmub_cmd_fw_assisted_mclk_switch_config_v2 config_data;
};
/** /**
* enum dmub_cmd_idle_opt_type - Idle optimization command type. * enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/ */
...@@ -1494,6 +1574,12 @@ enum dmub_cmd_psr_type { ...@@ -1494,6 +1574,12 @@ enum dmub_cmd_psr_type {
DMUB_CMD__SET_PSR_POWER_OPT = 7, DMUB_CMD__SET_PSR_POWER_OPT = 7,
}; };
enum dmub_cmd_fams_type {
DMUB_CMD__FAMS_SETUP_FW_CTRL = 0,
DMUB_CMD__FAMS_DRR_UPDATE = 1,
DMUB_CMD__HANDLE_SUBVP_CMD = 2, // specifically for SubVP cmd
};
/** /**
* PSR versions. * PSR versions.
*/ */
...@@ -2958,6 +3044,9 @@ union dmub_rb_cmd { ...@@ -2958,6 +3044,9 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__CAB command. * Definition of a DMUB_CMD__CAB command.
*/ */
struct dmub_rb_cmd_cab_for_ss cab; struct dmub_rb_cmd_cab_for_ss cab;
struct dmub_rb_cmd_fw_assisted_mclk_switch_v2 fw_assisted_mclk_switch_v2;
/** /**
* Definition of a DMUB_CMD__IDLE_OPT_DCN_RESTORE command. * Definition of a DMUB_CMD__IDLE_OPT_DCN_RESTORE command.
*/ */
......
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DMUB_SUBVP_STATE_H
#define DMUB_SUBVP_STATE_H
#include "dmub_cmd.h"
#define DMUB_SUBVP_INST0 0
#define DMUB_SUBVP_INST1 1
#define SUBVP_MAX_WATERMARK 0xFFFF
struct dmub_subvp_hubp_state {
uint32_t CURSOR0_0_CURSOR_POSITION;
uint32_t CURSOR0_0_CURSOR_HOT_SPOT;
uint32_t CURSOR0_0_CURSOR_DST_OFFSET;
uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
uint32_t CURSOR0_0_CURSOR_SIZE;
uint32_t CURSOR0_0_CURSOR_CONTROL;
uint32_t HUBPREQ0_CURSOR_SETTINGS;
uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE;
uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS;
uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS;
uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C;
uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
};
enum subvp_error_code {
DMUB_SUBVP_INVALID_STATE,
DMUB_SUBVP_INVALID_TRANSITION,
};
enum subvp_state {
DMUB_SUBVP_DISABLED,
DMUB_SUBVP_IDLE,
DMUB_SUBVP_TRY_ACQUIRE_LOCKS,
DMUB_SUBVP_WAIT_FOR_LOCKS,
DMUB_SUBVP_PRECONFIGURE,
DMUB_SUBVP_PREPARE,
DMUB_SUBVP_ENABLE,
DMUB_SUBVP_SWITCHING,
DMUB_SUBVP_END,
DMUB_SUBVP_RESTORE,
};
/* Defines information for SUBVP to handle vertical interrupts. */
struct dmub_subvp_vertical_interrupt_event {
/**
* @inst: Hardware instance of vertical interrupt.
*/
uint8_t otg_inst;
/**
* @pad: Align structure to 4 byte boundary.
*/
uint8_t pad[3];
enum subvp_state curr_state;
};
struct dmub_subvp_vertical_interrupt_state {
/**
* @events: Event list.
*/
struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS];
};
struct dmub_subvp_vline_interrupt_event {
uint8_t hubp_inst;
uint8_t pad[3];
};
struct dmub_subvp_vline_interrupt_state {
struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES];
};
struct dmub_subvp_interrupt_ctx {
struct dmub_subvp_vertical_interrupt_state vertical_int;
struct dmub_subvp_vline_interrupt_state vline_int;
};
struct dmub_subvp_pipe_state {
uint32_t pix_clk_100hz;
uint16_t main_vblank_start;
uint16_t main_vblank_end;
uint16_t mall_region_lines;
uint16_t prefetch_lines;
uint16_t prefetch_to_mall_start_lines;
uint16_t processing_delay_lines;
uint8_t main_pipe_index;
uint8_t phantom_pipe_index;
uint16_t htotal; // htotal for main / phantom pipe
uint16_t vtotal;
uint16_t optc_underflow_count;
uint16_t hubp_underflow_count;
uint8_t pad[2];
};
/**
* struct dmub_subvp_vblank_drr_info - Store DRR state when handling
* SubVP + VBLANK with DRR multi-display case.
*
* The info stored in this struct is only valid if drr_in_use = 1.
*/
struct dmub_subvp_vblank_drr_info {
uint8_t drr_in_use;
uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame
uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK
uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements
uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end)
uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end)
uint8_t use_ramping; // Use ramping or not
uint8_t pad[1];
};
struct dmub_subvp_vblank_pipe_info {
uint32_t pix_clk_100hz;
uint16_t vblank_start;
uint16_t vblank_end;
uint16_t vstartup_start;
uint16_t vtotal;
uint16_t htotal;
uint8_t pipe_index;
uint8_t pad[1];
struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case
};
enum subvp_switch_type {
DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE
DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays
DMUB_SUBVP_AND_VBLANK,
DMUB_SUBVP_AND_FPO,
};
/* SubVP state. */
struct dmub_subvp_state {
struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS];
struct dmub_subvp_interrupt_ctx int_ctx;
struct dmub_subvp_vblank_pipe_info vblank_info;
enum subvp_state state; // current state
enum subvp_switch_type switch_type; // enum take up 4 bytes (?)
uint8_t mclk_pending;
uint8_t num_subvp_streams;
uint8_t vertical_int_margin_us;
uint8_t pstate_allow_width_us;
uint32_t subvp_mclk_switch_count;
uint32_t subvp_wait_lock_count;
uint32_t driver_wait_lock_count;
uint32_t subvp_vblank_frame_count;
uint16_t watermark_a_cache;
uint8_t pad[2];
};
#endif /* _DMUB_SUBVP_STATE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment