Commit 5422b37c authored by Rodrigo Vivi's avatar Rodrigo Vivi

drm/i915/psr: Kill delays when activating psr back.

The immediate enabling was actually not an issue for the
HW perspective for core platforms that have HW tracking.
HW will wait few identical idle frames before transitioning
to actual psr active anyways.

Now that we removed VLV/CHV out of the picture completely
we can safely remove any delays.

Note that this patch also remove the delayed activation
on HSW and BDW introduced by commit 'd0ac896a
("drm/i915: Delay first PSR activation.")'. This was
introduced to fix a blank screen on VLV/CHV and also
masked some frozen screens on other core platforms.
Probably the same that we are now properly hunting and fixing.

v2:(DK): Remove unnecessary WARN_ONs and make some other
         VLV | CHV more readable.
v3: Do it regardless the timer rework.
v4: (DK/CI): Add VLV || CHV check on cancel work at psr_disable.
v5: Kill remaining items and fully rework activation functions.
v6: Rebase on top of VLV/CHV clean-up and keep the reactivation
    on a regular non-delayed work to avoid extra delays on exit
    calls and allow us to add few more safety checks before
    real activation.

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Reviewed-by: default avatarDhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180613192600.3955-1-rodrigo.vivi@intel.com
parent a2bbf714
...@@ -2660,8 +2660,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2660,8 +2660,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits); dev_priv->psr.busy_frontbuffer_bits);
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
if (dev_priv->psr.psr2_enabled) if (dev_priv->psr.psr2_enabled)
enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
......
...@@ -613,7 +613,7 @@ struct i915_psr { ...@@ -613,7 +613,7 @@ struct i915_psr {
bool sink_support; bool sink_support;
struct intel_dp *enabled; struct intel_dp *enabled;
bool active; bool active;
struct delayed_work work; struct work_struct work;
unsigned busy_frontbuffer_bits; unsigned busy_frontbuffer_bits;
bool sink_psr2_support; bool sink_psr2_support;
bool link_standby; bool link_standby;
......
...@@ -671,21 +671,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, ...@@ -671,21 +671,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.enable_source(intel_dp, crtc_state); dev_priv->psr.enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = intel_dp; dev_priv->psr.enabled = intel_dp;
if (INTEL_GEN(dev_priv) >= 9) { intel_psr_activate(intel_dp);
intel_psr_activate(intel_dp);
} else {
/*
* FIXME: Activation should happen immediately since this
* function is just called after pipe is fully trained and
* enabled.
* However on some platforms we face issues when first
* activation follows a modeset so quickly.
* - On HSW/BDW we get a recoverable frozen screen until
* next exit-activate sequence.
*/
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
unlock: unlock:
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
...@@ -768,8 +754,6 @@ void intel_psr_disable(struct intel_dp *intel_dp, ...@@ -768,8 +754,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
dev_priv->psr.enabled = NULL; dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
cancel_delayed_work_sync(&dev_priv->psr.work);
} }
static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
...@@ -805,10 +789,13 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) ...@@ -805,10 +789,13 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
static void intel_psr_work(struct work_struct *work) static void intel_psr_work(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work); container_of(work, typeof(*dev_priv), psr.work);
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled)
goto unlock;
/* /*
* We have to make sure PSR is ready for re-enable * We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle. * otherwise it keeps disabled until next full enable/disable cycle.
...@@ -949,9 +936,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, ...@@ -949,9 +936,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
} }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
if (!work_busy(&dev_priv->psr.work.work)) schedule_work(&dev_priv->psr.work);
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
} }
...@@ -998,7 +983,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv) ...@@ -998,7 +983,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = false; dev_priv->psr.link_standby = false;
} }
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); INIT_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock); mutex_init(&dev_priv->psr.lock);
dev_priv->psr.enable_source = hsw_psr_enable_source; dev_priv->psr.enable_source = hsw_psr_enable_source;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment