Commit 9069b775 authored by Imre Deak's avatar Imre Deak

drm/i915/dp_mst: Fix PBN / MTP_TU size calculation for UHBR rates

Atm the allocated MST PBN value is calculated from the TU size (number
of allocated MTP slots) as

  PBN = TU * pbn_div

pbn_div being the link BW for each MTP slot. For DP 1.4 link rates this
worked, as pbn_div there is guraranteed to be an integer number, however
on UHBR this isn't the case. To get a PBN, TU pair where TU is a
properly rounded-up value covering all the BW corresponding to PBN,
calculate first PBN and from PBN the TU value.

Calculate PBN directly from the effective pixel data rate, instead of
calculating it indirectly from the corresponding TU and pbn_div values
(which are in turn derived from the pixel data rate and BW overhead).

Add a helper function to calculate the effective data rate, also adding
a note that callers of intel_dp_link_required() may also need to check
the effective data rate (vs. the data rate w/o the BW overhead).

While at it add a note to check if WA#14013163432 is applicable.

v2:
- Fix PBN calculation, deriving it from the effective data rate directly
  instead of using the indirect TU and pbn_div values for this.
- Add a note about WA#14013163432. (Arun)
v3:
- Fix rounding up quotient while calculating remote_tu. (Ville)

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Arun R Murthy <arun.r.murthy@intel.com> (v1)
Signed-off-by: default avatarImre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231117150929.1767227-3-imre.deak@intel.com
parent e86fb4dc
......@@ -355,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
/*
* The required data bandwidth for a mode with given pixel clock and bpp. This
* is the required net bandwidth independent of the data bandwidth efficiency.
*
* TODO: check if callers of this functions should use
* intel_dp_effective_data_rate() instead.
*/
int
intel_dp_link_required(int pixel_clock, int bpp)
......@@ -363,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
/**
* intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
* @pixel_clock: pixel clock in kHz
* @bpp_x16: bits per pixel .4 fixed point format
* @bw_overhead: BW allocation overhead in 1ppm units
*
* Return the effective pixel data rate in kB/sec units taking into account
* the provided SSC, FEC, DSC BW allocation overhead.
*/
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead)
{
return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
1000000 * 16 * 8);
}
/*
* Given a link rate and lanes, get the data bandwidth.
*
......
......@@ -103,6 +103,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
......
......@@ -115,6 +115,7 @@ static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
......@@ -124,6 +125,18 @@ static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
}
static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
{
int effective_data_rate =
intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
/*
* TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
* to calculate PBN with the BW overhead passed to it.
*/
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
}
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
......@@ -167,10 +180,10 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
min_bpp, max_bpp);
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
struct intel_link_m_n remote_m_n;
int local_bw_overhead;
int remote_bw_overhead;
int link_bpp_x16;
int remote_tu;
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
......@@ -191,11 +204,6 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
link_bpp_x16,
&crtc_state->dp_m_n);
intel_dp_mst_compute_m_n(crtc_state, connector,
remote_bw_overhead,
link_bpp_x16,
&remote_m_n);
/*
* The TU size programmed to the HW determines which slots in
* an MTP frame are used for this stream, which needs to match
......@@ -210,9 +218,14 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
* crtc_state->dp_m_n.tu), provided that the driver doesn't
* enable SSC on the corresponding link.
*/
drm_WARN_ON(&i915->drm, remote_m_n.tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_m_n.tu;
crtc_state->pbn = remote_m_n.tu * dfixed_trunc(mst_state->pbn_div);
crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
link_bpp_x16,
remote_bw_overhead);
remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_tu;
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
......@@ -221,7 +234,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
return slots;
if (slots >= 0) {
drm_WARN_ON(&i915->drm, slots != remote_m_n.tu);
drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment