Commit e6f29923 authored by Ville Syrjälä's avatar Ville Syrjälä

drm/i915: Allow M/N change during fastset on bdw+

On BDW+ M/N are double buffered and so we can easily reprogram them
during a fastset. So for eDP panels that support seamless DRRS we
can just change these without a full modeset.

For earlier platforms we'd need to play tricks with M1/N1 vs.
M2/N2 during the fastset to make sure we do the switch atomically.
Not sure the added complexity is worth the hassle, so leave it
alone for now.

The slight downside is that we have to keep the link running at
a link rate capable of supporting the highest refresh rate we
want to use. For the moment we just pick the highest mode the
panel reports and calculate the link based on that. This might
need further refinement (eg. if we run into bandwidth
restrictions)...

v2: Only use the high link rate if the platform really supports
    the seamless M/N change uring fastset (ie. bdw+)
v3: Rebase due to HAS_DOUBLE_BUFFERED_M_N()
Reviewed-by: default avatarMika Kahola <mika.kahola@intel.com>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220907091057.11572-16-ville.syrjala@linux.intel.com
parent 74d6f31f
...@@ -5730,6 +5730,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, ...@@ -5730,6 +5730,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(lane_lat_optim_mask); PIPE_CONF_CHECK_X(lane_lat_optim_mask);
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
if (!fastset || !pipe_config->seamless_m_n)
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
} else { } else {
PIPE_CONF_CHECK_M_N(dp_m_n); PIPE_CONF_CHECK_M_N(dp_m_n);
...@@ -5862,8 +5863,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, ...@@ -5862,8 +5863,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp); PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->seamless_m_n) {
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
}
PIPE_CONF_CHECK_I(port_clock); PIPE_CONF_CHECK_I(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level); PIPE_CONF_CHECK_I(min_voltage_level);
...@@ -7002,6 +7005,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, ...@@ -7002,6 +7005,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 || if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state); hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->seamless_m_n)
intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
&new_crtc_state->dp_m_n);
} }
static void commit_pipe_pre_planes(struct intel_atomic_state *state, static void commit_pipe_pre_planes(struct intel_atomic_state *state,
......
...@@ -1130,6 +1130,7 @@ struct intel_crtc_state { ...@@ -1130,6 +1130,7 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */ /* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2; struct intel_link_m_n dp_m2_n2;
bool has_drrs; bool has_drrs;
bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */ /* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr; bool has_psr;
......
...@@ -1297,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, ...@@ -1297,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
} }
} }
static bool has_seamless_m_n(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
/*
* Seamless M/N reprogramming only implemented
* for BDW+ double buffered M/N registers so far.
*/
return HAS_DOUBLE_BUFFERED_M_N(i915) &&
intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
}
static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
/* FIXME a bit of a mess wrt clock vs. crtc_clock */
if (has_seamless_m_n(connector))
return intel_panel_highest_mode(connector, adjusted_mode)->clock;
else
return adjusted_mode->crtc_clock;
}
/* Optimize link config in order: max bpp, min clock, min lanes */ /* Optimize link config in order: max bpp, min clock, min lanes */
static int static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config, struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state,
const struct link_config_limits *limits) const struct link_config_limits *limits)
{ {
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int bpp, i, lane_count;
int mode_rate, link_rate, link_avail; int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, mode_rate = intel_dp_link_required(clock, output_bpp);
output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) { for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp_common_rate(intel_dp, i); link_rate = intel_dp_common_rate(intel_dp, i);
...@@ -1622,7 +1646,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, ...@@ -1622,7 +1646,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Optimize for slow and wide for everything, because there are some * Optimize for slow and wide for everything, because there are some
* eDP 1.3 and 1.4 panels don't work well with fast and narrow. * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
*/ */
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) { if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
...@@ -1910,6 +1934,9 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, ...@@ -1910,6 +1934,9 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock; int pixel_clock;
if (has_seamless_m_n(connector))
pipe_config->seamless_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2); intel_zero_m_n(&pipe_config->dp_m2_n2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment