Commit ebb5eb7d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Replace pcu_lock with sb_lock

We now have two locks for sideband access. The general one covering
sideband access across all generation, sb_lock, and a specific one
covering sideband access via the punit on vlv/chv. After lifting the
sb_lock around the punit into the callers, the pcu_lock is now redudant
and can be separated from its other use to regulate RPS (essentially
giving RPS a lock all of its own).

v2: Extract a couple of minor bug fixes.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190426081725.31217-4-chris@chris-wilson.co.uk
parent 337fa6e0
...@@ -1046,8 +1046,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1046,8 +1046,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts; u32 rpmodectl, freq_sts;
mutex_lock(&dev_priv->pcu_lock);
rpmodectl = I915_READ(GEN6_RP_CONTROL); rpmodectl = I915_READ(GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n", seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
...@@ -1082,7 +1080,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1082,7 +1080,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, seq_printf(m,
"efficient (RPe) frequency: %d MHz\n", "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, rps->efficient_freq)); intel_gpu_freq(dev_priv, rps->efficient_freq));
mutex_unlock(&dev_priv->pcu_lock);
} else if (INTEL_GEN(dev_priv) >= 6) { } else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits; u32 rp_state_limits;
u32 gt_perf_status; u32 gt_perf_status;
...@@ -1487,12 +1484,9 @@ static int gen6_drpc_info(struct seq_file *m) ...@@ -1487,12 +1484,9 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
} }
if (INTEL_GEN(dev_priv) <= 7) { if (INTEL_GEN(dev_priv) <= 7)
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
&rc6vids); &rc6vids);
mutex_unlock(&dev_priv->pcu_lock);
}
seq_printf(m, "RC1e Enabled: %s\n", seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
...@@ -1756,17 +1750,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1756,17 +1750,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
unsigned int max_gpu_freq, min_gpu_freq; unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int gpu_freq, ia_freq; int gpu_freq, ia_freq;
int ret;
if (!HAS_LLC(dev_priv)) if (!HAS_LLC(dev_priv))
return -ENODEV; return -ENODEV;
wakeref = intel_runtime_pm_get(dev_priv);
ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
goto out;
min_gpu_freq = rps->min_freq; min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq; max_gpu_freq = rps->max_freq;
if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
...@@ -1777,6 +1764,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1777,6 +1764,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
wakeref = intel_runtime_pm_get(dev_priv);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq; ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv, sandybridge_pcode_read(dev_priv,
...@@ -1790,12 +1778,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1790,12 +1778,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100); ((ia_freq >> 8) & 0xff) * 100);
} }
mutex_unlock(&dev_priv->pcu_lock);
out:
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
return ret;
return 0;
} }
static int i915_opregion(struct seq_file *m, void *unused) static int i915_opregion(struct seq_file *m, void *unused)
...@@ -2032,13 +2017,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2032,13 +2017,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
act_freq = vlv_punit_read(dev_priv, act_freq = vlv_punit_read(dev_priv,
PUNIT_REG_GPU_FREQ_STS); PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff; act_freq = (act_freq >> 8) & 0xff;
mutex_unlock(&dev_priv->pcu_lock);
} else { } else {
act_freq = intel_get_cagf(dev_priv, act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1)); I915_READ(GEN6_RPSTAT1));
......
...@@ -648,6 +648,8 @@ struct intel_rps_ei { ...@@ -648,6 +648,8 @@ struct intel_rps_ei {
}; };
struct intel_rps { struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
/* /*
* work, interrupts_enabled and pm_iir are protected by * work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock * dev_priv->irq_lock
...@@ -1710,14 +1712,6 @@ struct drm_i915_private { ...@@ -1710,14 +1712,6 @@ struct drm_i915_private {
*/ */
u32 edram_size_mb; u32 edram_size_mb;
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested. Note that
* this lock may be held for long periods of time when
* talking to hw - so only take it when talking to hw!
*/
struct mutex pcu_lock;
/* gen6+ GT PM state */ /* gen6+ GT PM state */
struct intel_gen6_power_mgmt gt_pm; struct intel_gen6_power_mgmt gt_pm;
......
...@@ -1301,7 +1301,7 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1301,7 +1301,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
goto out; goto out;
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&rps->lock);
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
...@@ -1367,7 +1367,7 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1367,7 +1367,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = 0; rps->last_adj = 0;
} }
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&rps->lock);
out: out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
......
...@@ -263,7 +263,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ...@@ -263,7 +263,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
wakeref = intel_runtime_pm_get(dev_priv); wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
...@@ -273,7 +272,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ...@@ -273,7 +272,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
} else { } else {
freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
} }
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
...@@ -318,12 +316,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, ...@@ -318,12 +316,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
if (val < rps->min_freq || val > rps->max_freq) if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL; return -EINVAL;
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&rps->lock);
if (val != rps->boost_freq) { if (val != rps->boost_freq) {
rps->boost_freq = val; rps->boost_freq = val;
boost = atomic_read(&rps->num_waiters); boost = atomic_read(&rps->num_waiters);
} }
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&rps->lock);
if (boost) if (boost)
schedule_work(&rps->work); schedule_work(&rps->work);
...@@ -364,17 +362,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -364,17 +362,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
return ret; return ret;
wakeref = intel_runtime_pm_get(dev_priv); wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&rps->lock);
mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
if (val < rps->min_freq || if (val < rps->min_freq ||
val > rps->max_freq || val > rps->max_freq ||
val < rps->min_freq_softlimit) { val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock); ret = -EINVAL;
intel_runtime_pm_put(dev_priv, wakeref); goto unlock;
return -EINVAL;
} }
if (val > rps->rp0_freq) if (val > rps->rp0_freq)
...@@ -392,8 +387,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -392,8 +387,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */ * frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val); ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->pcu_lock); unlock:
mutex_unlock(&rps->lock);
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count; return ret ?: count;
...@@ -423,17 +418,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -423,17 +418,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return ret; return ret;
wakeref = intel_runtime_pm_get(dev_priv); wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&rps->lock);
mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
if (val < rps->min_freq || if (val < rps->min_freq ||
val > rps->max_freq || val > rps->max_freq ||
val > rps->max_freq_softlimit) { val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock); ret = -EINVAL;
intel_runtime_pm_put(dev_priv, wakeref); goto unlock;
return -EINVAL;
} }
rps->min_freq_softlimit = val; rps->min_freq_softlimit = val;
...@@ -447,8 +439,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -447,8 +439,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */ * frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val); ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->pcu_lock); unlock:
mutex_unlock(&rps->lock);
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count; return ret ?: count;
......
...@@ -464,7 +464,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, ...@@ -464,7 +464,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
{ {
u32 val; u32 val;
mutex_lock(&dev_priv->pcu_lock);
vlv_iosf_sb_get(dev_priv, vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
...@@ -477,7 +476,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, ...@@ -477,7 +476,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
vlv_iosf_sb_put(dev_priv, vlv_iosf_sb_put(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv))
cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >> cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
...@@ -556,7 +554,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -556,7 +554,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
BIT(VLV_IOSF_SB_BUNIT) | BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT)); BIT(VLV_IOSF_SB_PUNIT));
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK; val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT); val |= (cmd << DSPFREQGUAR_SHIFT);
...@@ -566,7 +563,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -566,7 +563,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
50)) { 50)) {
DRM_ERROR("timed out waiting for CDclk change\n"); DRM_ERROR("timed out waiting for CDclk change\n");
} }
mutex_unlock(&dev_priv->pcu_lock);
if (cdclk == 400000) { if (cdclk == 400000) {
u32 divider; u32 divider;
...@@ -639,7 +635,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -639,7 +635,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/ */
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV; val &= ~DSPFREQGUAR_MASK_CHV;
...@@ -652,7 +647,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -652,7 +647,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
} }
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
...@@ -731,10 +725,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -731,10 +725,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n")) "trying to change cdclk frequency with cdclk not enabled\n"))
return; return;
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n"); DRM_ERROR("failed to inform pcode about cdclk change\n");
return; return;
...@@ -783,10 +775,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -783,10 +775,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n"); DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level); cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
...@@ -1025,12 +1015,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1025,12 +1015,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
*/ */
WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000); WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3); SKL_CDCLK_READY_FOR_CHANGE, 3);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret); ret);
...@@ -1094,10 +1082,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1094,10 +1082,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
POSTING_READ(CDCLK_CTL); POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */ /* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level); cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
} }
...@@ -1394,12 +1380,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1394,12 +1380,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* requires us to wait up to 150usec, but that leads to timeouts; * requires us to wait up to 150usec, but that leads to timeouts;
* the 2ms used here is based on experiment. * the 2ms used here is based on experiment.
*/ */
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write_timeout(dev_priv, ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2); 0x80000000, 150, 2);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
ret, cdclk); ret, cdclk);
...@@ -1429,7 +1412,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1429,7 +1412,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE) if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe); intel_wait_for_vblank(dev_priv, pipe);
mutex_lock(&dev_priv->pcu_lock);
/* /*
* The timeout isn't specified, the 2ms used here is based on * The timeout isn't specified, the 2ms used here is based on
* experiment. * experiment.
...@@ -1439,8 +1421,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1439,8 +1421,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
ret = sandybridge_pcode_write_timeout(dev_priv, ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level, 150, 2); cdclk_state->voltage_level, 150, 2);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
ret, cdclk); ret, cdclk);
...@@ -1663,12 +1643,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1663,12 +1643,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider; u32 val, divider;
int ret; int ret;
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3); SKL_CDCLK_READY_FOR_CHANGE, 3);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret); ret);
...@@ -1707,10 +1685,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1707,10 +1685,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv, pipe); intel_wait_for_vblank(dev_priv, pipe);
/* inform PCU of the change */ /* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level); cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
...@@ -1849,12 +1825,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1849,12 +1825,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
unsigned int vco = cdclk_state->vco; unsigned int vco = cdclk_state->vco;
int ret; int ret;
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3); SKL_CDCLK_READY_FOR_CHANGE, 3);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret); ret);
...@@ -1876,10 +1850,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1876,10 +1850,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE | I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk)); skl_cdclk_decimal(cdclk));
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level); cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv); intel_update_cdclk(dev_priv);
......
...@@ -5331,10 +5331,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) ...@@ -5331,10 +5331,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL)); IPS_ENABLE | IPS_PCODE_CONTROL));
mutex_unlock(&dev_priv->pcu_lock);
/* Quoting Art Runyan: "its not safe to expect any particular /* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the * value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state, * mailbox." Moreover, the mailbox may return a bogus state,
...@@ -5364,9 +5362,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) ...@@ -5364,9 +5362,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return; return;
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
/* /*
* Wait for PCODE to finish disabling IPS. The BSpec specified * Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms * 42ms timeout value leads to occasional timeouts so use 100ms
...@@ -9506,11 +9502,9 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) ...@@ -9506,11 +9502,9 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{ {
if (IS_HASWELL(dev_priv)) { if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val)) val))
DRM_DEBUG_KMS("Failed to write to D_COMP\n"); DRM_DEBUG_KMS("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->pcu_lock);
} else { } else {
I915_WRITE(D_COMP_BDW, val); I915_WRITE(D_COMP_BDW, val);
POSTING_READ(D_COMP_BDW); POSTING_READ(D_COMP_BDW);
......
...@@ -213,10 +213,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) ...@@ -213,10 +213,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f. * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
*/ */
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1); SKL_PCODE_LOAD_HDCP_KEYS, 1);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
DRM_ERROR("Failed to initiate HDCP key load (%d)\n", DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
ret); ret);
......
This diff is collapsed.
...@@ -1211,7 +1211,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, ...@@ -1211,7 +1211,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx); PUNIT_PWRGT_PWR_GATE(pw_idx);
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
#define COND \ #define COND \
...@@ -1234,7 +1233,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, ...@@ -1234,7 +1233,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
out: out:
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
} }
static void vlv_power_well_enable(struct drm_i915_private *dev_priv, static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
...@@ -1261,7 +1259,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -1261,7 +1259,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
mask = PUNIT_PWRGT_MASK(pw_idx); mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
...@@ -1282,7 +1279,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -1282,7 +1279,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
WARN_ON(ctrl != state); WARN_ON(ctrl != state);
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
return enabled; return enabled;
} }
...@@ -1768,7 +1764,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -1768,7 +1764,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
bool enabled; bool enabled;
u32 state, ctrl; u32 state, ctrl;
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
...@@ -1787,7 +1782,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -1787,7 +1782,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
WARN_ON(ctrl << 16 != state); WARN_ON(ctrl << 16 != state);
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
return enabled; return enabled;
} }
...@@ -1802,7 +1796,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, ...@@ -1802,7 +1796,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
#define COND \ #define COND \
...@@ -1825,7 +1818,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, ...@@ -1825,7 +1818,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
out: out:
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
} }
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
...@@ -4019,11 +4011,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0 ...@@ -4019,11 +4011,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
{ {
bool ret; bool ret;
mutex_lock(&dev_priv->pcu_lock);
vlv_punit_get(dev_priv); vlv_punit_get(dev_priv);
ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
vlv_punit_put(dev_priv); vlv_punit_put(dev_priv);
mutex_unlock(&dev_priv->pcu_lock);
return ret; return ret;
} }
......
...@@ -143,8 +143,6 @@ u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) ...@@ -143,8 +143,6 @@ u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
{ {
u32 val = 0; u32 val = 0;
lockdep_assert_held(&i915->pcu_lock);
vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val); SB_CRRDDA_NP, addr, &val);
...@@ -153,8 +151,6 @@ u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) ...@@ -153,8 +151,6 @@ u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val) int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
{ {
lockdep_assert_held(&i915->pcu_lock);
return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val); SB_CRWRDA_NP, addr, &val);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment