Commit b6e881c9 authored by Dmytro Laktyushkin's avatar Dmytro Laktyushkin Committed by Alex Deucher

drm/amd/display: update navi to use new surface programming behaviour

New behaviour will track global updates and update any hw that isn't
related to current stream being updated.

This should fix any issues caused by pipe split pipes being taken
by other streams.
Signed-off-by: default avatarDmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: default avatarCharlene Liu <Charlene.Liu@amd.com>
Acked-by: default avatarBhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 25409b37
......@@ -5866,6 +5866,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
......@@ -6287,9 +6288,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
......@@ -7158,7 +7160,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
......
......@@ -761,8 +761,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
#endif
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, dangling_context);
#endif
}
current_ctx = dc->current_state;
......@@ -1073,15 +1078,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
if (dc->hwss.apply_ctx_for_surface)
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
}
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
}
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
......@@ -1100,16 +1110,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
/*
* enable stereo
......@@ -1492,20 +1507,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
update_flags->raw = 0; // Reset all flags
if (u->flip_addr)
update_flags->bits.addr_update = 1;
if (!is_surface_in_context(context, u->surface)) {
update_flags->bits.new_plane = 1;
if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
if (u->surface->force_full_update) {
update_flags->bits.full_update = 1;
return UPDATE_TYPE_FULL;
}
update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
......@@ -1563,40 +1573,43 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
return UPDATE_TYPE_FULL;
overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
if ((stream_update->src.height != 0) &&
(stream_update->src.width != 0))
return UPDATE_TYPE_FULL;
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->dst.height != 0) &&
(stream_update->dst.width != 0))
return UPDATE_TYPE_FULL;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
(stream_update->dst.height != 0 && stream_update->dst.width != 0))
su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
return UPDATE_TYPE_FULL;
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
return UPDATE_TYPE_FULL;
su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
return UPDATE_TYPE_FULL;
su_flags->bits.dpms_off = 1;
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
return UPDATE_TYPE_FULL;
su_flags->bits.wb_update = 1;
#endif
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
if (type == UPDATE_TYPE_FULL)
return type;
elevate_update_type(&overall_type, type);
}
......@@ -1618,13 +1631,18 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
if (stream_update)
stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL)
if (type == UPDATE_TYPE_FULL) {
if (stream_update)
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
dc->optimized_required = true;
......@@ -2000,7 +2018,13 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
#endif
return;
}
......@@ -2060,10 +2084,15 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
dc->hwss.apply_ctx_for_surface(
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
dc->hwss.program_front_end_for_ctx(dc, context);
#endif
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
......
......@@ -113,6 +113,21 @@ struct periodic_interrupt_config {
int lines_offset;
};
union stream_update_flags {
struct {
uint32_t scaling:1;
uint32_t out_tf:1;
uint32_t out_csc:1;
uint32_t abm_level:1;
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t wb_update:1;
#endif
} bits;
uint32_t raw;
};
struct dc_stream_state {
// sink is deprecated, new code should not reference
......@@ -214,9 +229,12 @@ struct dc_stream_state {
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
#endif
union stream_update_flags update_flags;
};
struct dc_stream_update {
struct dc_stream_state *stream;
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
......
......@@ -508,7 +508,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
}
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
static void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
DC_LOGGER_INIT(dc->ctx->logger);
......@@ -923,7 +923,7 @@ static void dcn20_power_on_plane(
}
}
void dcn20_enable_plane(
static void dcn20_enable_plane(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
......@@ -999,72 +999,6 @@ void dcn20_enable_plane(
}
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
pipe_ctx->plane_state->update_flags.bits.full_update =
context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
if (pipe_ctx->plane_state->update_flags.bits.full_update)
dcn20_enable_plane(dc, pipe_ctx, context);
update_dchubp_dpp(dc, pipe_ctx, context);
set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for full update.
* TODO: This can be further optimized/cleaned up
* Always call this for now since it does memcmp inside before
* doing heavy calculation and programming
*/
if (pipe_ctx->plane_state->update_flags.bits.full_update)
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
}
static void dcn20_program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
if (dc->hwss.update_odm)
dc->hwss.update_odm(dc, context, pipe_ctx);
}
if (pipe_ctx->plane_state != NULL)
dcn20_program_pipe(dc, pipe_ctx, context);
if (pipe_ctx->bottom_pipe != NULL) {
ASSERT(pipe_ctx->bottom_pipe != pipe_ctx);
dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
} else if (pipe_ctx->next_odm_pipe != NULL) {
ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx);
dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context);
}
}
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
......@@ -1087,7 +1021,7 @@ void dcn20_pipe_control_lock_global(
}
}
void dcn20_pipe_control_lock(
static void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
......@@ -1124,114 +1058,436 @@ void dcn20_pipe_control_lock(
}
}
static void dcn20_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context)
static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
struct timing_generator *tg;
bool removed_pipe[6] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
struct pipe_ctx *prev_top_pipe_to_program =
find_top_pipe_for_stream(dc, dc->current_state, stream);
DC_LOGGER_INIT(dc->ctx->logger);
new_pipe->update_flags.raw = 0;
if (!top_pipe_to_program)
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
/* Detect pipe enable/disable */
if (!old_pipe->plane_state && new_pipe->plane_state) {
new_pipe->update_flags.bits.enable = 1;
new_pipe->update_flags.bits.mpcc = 1;
new_pipe->update_flags.bits.dppclk = 1;
new_pipe->update_flags.bits.hubp_interdependent = 1;
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
new_pipe->update_flags.bits.gamut_remap = 1;
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
}
return;
}
if (old_pipe->plane_state && !new_pipe->plane_state) {
new_pipe->update_flags.bits.disable = 1;
return;
}
/* Carry over GSL groups in case the context is changing. */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
/* Detect top pipe only changes */
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
/* Detect odm changes */
if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
&& old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
|| (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
|| (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.odm = 1;
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
|| old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
|| old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
new_pipe->update_flags.bits.global_sync = 1;
}
if (pipe_ctx->stream == stream &&
pipe_ctx->stream == old_pipe_ctx->stream)
pipe_ctx->stream_res.gsl_group =
old_pipe_ctx->stream_res.gsl_group;
/*
* Detect opp / tg change, only set on change, not on enable
* Assume mpcc inst = pipe index, if not this code needs to be updated
* since mpcc is what is affected by these. In fact all of our sequence
* makes this assumption at the moment with how hubp reset is matched to
* same index mpcc reset.
*/
if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.opp_changed = 1;
if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
new_pipe->update_flags.bits.tg_changed = 1;
/* Detect mpcc blending changes, only dpp inst and bot matter here */
if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
|| old_pipe->stream_res.opp != new_pipe->stream_res.opp
|| (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
|| (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
|| (old_pipe->bottom_pipe && new_pipe->bottom_pipe
&& old_pipe->bottom_pipe->plane_res.mpcc_inst
!= new_pipe->bottom_pipe->plane_res.mpcc_inst))
new_pipe->update_flags.bits.mpcc = 1;
/* Detect dppclk change */
if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
new_pipe->update_flags.bits.dppclk = 1;
/* Check for scl update */
if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
new_pipe->update_flags.bits.scaler = 1;
/* Check for vp update */
if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
new_pipe->update_flags.bits.viewport = 1;
/* Detect dlg/ttu/rq updates */
{
struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
/* Detect pipe interdependent updates */
if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
new_pipe->update_flags.bits.hubp_interdependent = 1;
}
/* Detect any other updates to ttu/rq/dlg */
if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
}
}
tg = top_pipe_to_program->stream_res.tg;
static void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
interdependent_update = top_pipe_to_program->plane_state &&
top_pipe_to_program->plane_state->update_flags.bits.full_update;
if (pipe_ctx->update_flags.bits.dppclk) {
dpp->funcs->dpp_dppclk_control(dpp, false, true);
if (interdependent_update)
lock_all_pipes(dc, context, true);
else
dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
pipe_ctx->plane_res.bw.dppclk_khz,
false);
}
if (num_planes == 0) {
/* OTG blank before remove all front end */
dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
hubp->funcs->hubp_setup(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
}
if (pipe_ctx->update_flags.bits.hubp_interdependent)
hubp->funcs->hubp_setup_interdependent(
hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
if (pipe_ctx->update_flags.bits.enable ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.input_csc_change ||
plane_state->update_flags.bits.color_space_change ||
plane_state->update_flags.bits.coeff_reduction_change) {
struct dc_bias_and_scale bns_params = {0};
// program the input csc
dpp->funcs->dpp_setup(dpp,
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
plane_state->color_space,
NULL);
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
dcn10_build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
/* Disconnect unused mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
/*
* Powergate reused pipes that are not powergated
* fairly hacky right now, using opp_id as indicator
* TODO: After move dc_post to dc_update, this will
* be removed.
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
dc->hwss.disable_plane(dc, old_pipe_ctx);
if (pipe_ctx->update_flags.bits.mpcc
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
/* Need mpcc to be idle if changing opp */
if (pipe_ctx->update_flags.bits.opp_changed) {
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
int mpcc_inst;
for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
continue;
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
}
}
dc->hwss.update_mpcc(dc, pipe_ctx);
}
if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
/* scaler configuration */
pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
&pipe_ctx->plane_res.scl_data.viewport_c);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
}
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
/*call the dcn2 method which uses mpc csc*/
dc->hwss.program_output_csc(dc,
pipe_ctx,
pipe_ctx->stream->output_color_space,
pipe_ctx->stream->csc_color_matrix.matrix,
hubp->opp_id);
}
if (num_planes > 0)
dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.opp_changed ||
plane_state->update_flags.bits.pixel_format_change ||
plane_state->update_flags.bits.horizontal_mirror_change ||
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
plane_state->update_flags.bits.dcc_change ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.plane_size_change) {
struct plane_size size = plane_state->plane_size;
size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
&plane_state->tiling_info,
&size,
plane_state->rotation,
&plane_state->dcc,
plane_state->horizontal_mirror,
0);
hubp->power_gated = false;
}
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
dc->hwss.update_plane_addr(dc, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
}
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
/* Only need to unblank on top pipe */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
if (pipe_ctx->update_flags.bits.global_sync)
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
if (pipe_ctx->update_flags.bits.odm)
dc->hwss.update_odm(dc, context, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable)
dcn20_enable_plane(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.bits.enable
|| pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for powering on, internal memcmp to avoid
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
}
static bool does_pipe_need_lock(struct pipe_ctx *pipe)
{
if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
|| pipe->update_flags.raw)
return true;
if (pipe->bottom_pipe)
return does_pipe_need_lock(pipe->bottom_pipe);
/* Program secondary blending tree and writeback pipes */
if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
return false;
}
if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
static void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
/* Carry over GSL groups in case the context is changing. */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (!context->res_ctx.pipe_ctx[i].top_pipe &&
does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
!pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
if (!pipe_ctx->update_flags.bits.enable)
dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
pipe_locked[i] = true;
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
&& !context->res_ctx.pipe_ctx[i].top_pipe
&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
&& context->res_ctx.pipe_ctx[i].stream)
dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
if (interdependent_update)
lock_all_pipes(dc, context, false);
else
dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
dcn20_program_pipe(dc, pipe, context);
pipe = pipe->bottom_pipe;
}
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
&& dc->hwss.program_all_writeback_pipes_in_tree)
dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
/* Unlock all locked pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i])
dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
if (pipe_locked[i]) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
if (!pipe_ctx->update_flags.bits.enable)
dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
......@@ -1239,13 +1495,16 @@ static void dcn20_apply_ctx_for_surface(
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
i = 0;
if (num_planes > 0 && top_pipe_to_program &&
(prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) {
while (i < TIMEOUT_FOR_PIPE_ENABLE_MS &&
top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) {
i += 1;
msleep(1);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
msleep(1);
}
}
}
......@@ -2095,7 +2354,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
dc->hwss.apply_ctx_for_surface = NULL;
dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
......
......@@ -114,6 +114,9 @@ struct hw_sequencer_funcs {
int opp_id);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*program_front_end_for_ctx)(
struct dc *dc,
struct dc_state *context);
void (*program_triplebuffer)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment