Commit f0ec3054 authored by Alvin Lee's avatar Alvin Lee Committed by Alex Deucher

drm/amd/display: Ensure populate uclk in bb construction

[Description]
- For some SKUs, the optimal DCFCLK for each UCLK is less than the
  smallest DCFCLK STA target due to low memory bandwidth. There is
  an assumption that the DCFCLK STA targets will always be less
  than one of the optimal DCFCLK values, but this is not true for
  SKUs that have low memory bandwidth. In this case we need to
  populate the optimal UCLK for each DCFCLK STA targets as the max
  UCLK freq.
- Also fix a bug in DML where start_state is not assigned and used
  correctly.
Reviewed-by: default avatarSamson Tam <samson.tam@amd.com>
Reviewed-by: default avatarChaitanya Dhere <chaitanya.dhere@amd.com>
Acked-by: default avatarAlex Hung <alex.hung@amd.com>
Signed-off-by: default avatarAlvin Lee <alvin.lee2@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 038c5323
...@@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep //Calculate Swath, DET Configuration, DCFCLKDeepSleep
// //
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) { for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k]; v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
...@@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Return BW //Calculate Return BW
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) { if (v->BlendingAndTiming[k] == k) {
...@@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->UrgentOutOfOrderReturnPerChannelVMDataOnly); v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency); v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i]; v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
} }
...@@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (v->ClampMinDCFCLK) { if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */ /* Clamp calculated values to actual minimum */
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) { if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk; v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
...@@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} }
} }
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
v->IdealSDPPortBandwidthPerState[i][j] = dml_min3( v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
v->ReturnBusWidth * v->DCFCLKState[i][j], v->ReturnBusWidth * v->DCFCLKState[i][j],
...@@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Re-ordering Buffer Support Check //Re-ordering Buffer Support Check
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j] if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) { > (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
...@@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]; MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
} }
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min( v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100, v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
...@@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ...@@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Prefetch Check //Prefetch Check
for (i = 0; i < mode_lib->soc.num_states; ++i) { for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) { for (j = 0; j <= 1; ++j) {
int NextPrefetchModeState = MinPrefetchMode; int NextPrefetchModeState = MinPrefetchMode;
......
...@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p ...@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
optimal_uclk_for_dcfclk_sta_targets[i] = optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16; bw_params->clk_table.entries[j].memclk_mhz * 16;
break; break;
} else {
/* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
* This is required for dcn303 because it just so happens that the memory
* bandwidth is low enough such that all the optimal DCFCLK for each UCLK
* is lower than the smallest DCFCLK STA target. In this case we need to
* populate the optimal UCLK for each DCFCLK STA target to be the max UCLK.
*/
if (j == num_uclk_states - 1) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
}
} }
} }
} }
......
...@@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params ...@@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
optimal_uclk_for_dcfclk_sta_targets[i] = optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16; bw_params->clk_table.entries[j].memclk_mhz * 16;
break; break;
} else {
/* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
* If it just so happens that the memory bandwidth is low enough such that
* all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA
* target, we need to populate the optimal UCLK for each DCFCLK STA target to
* be the max UCLK.
*/
if (j == num_uclk_states - 1) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
}
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment