Commit b7137e0c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Defer enabling rc6 til after we submit the first batch/context

Some hardware requires a valid render context before it can initiate
rc6 power gating of the GPU; the default state of the GPU is not
sufficient and may lead to undefined behaviour. The first execution of
any batch will load the "golden render state", at which point it is safe
to enable rc6. As we do not forcibly load the kernel context at resume,
we have to hook into the batch submission to be sure that the render
state is setup before enabling rc6.

However, since we don't enable powersaving until that first batch, we
queued a delayed task in order to guarantee that the batch is indeed
submitted.

v2: Rearrange intel_disable_gt_powersave() to match.
v3: Apply user specified cur_freq (or idle_freq if not set).
v4: Give in, and supply a delayed work to autoenable rc6
v5: Mika suggested a couple of better names for delayed_resume_work
v6: Rebalance rpm_put around the autoenable task
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1468397438-21226-7-git-send-email-chris@chris-wilson.co.ukReviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
parent 62e1baa1
......@@ -1343,7 +1343,7 @@ void i915_driver_unload(struct drm_device *dev)
i915_destroy_error_state(dev);
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
i915_gem_fini(dev);
......@@ -1458,8 +1458,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
......@@ -1652,6 +1650,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
......@@ -1778,8 +1777,6 @@ int i915_reset(struct drm_i915_private *dev_priv)
unsigned reset_counter;
int ret;
intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
......@@ -1835,8 +1832,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev_priv);
intel_autoenable_gt_powersave(dev_priv);
return 0;
......@@ -2459,7 +2455,6 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
......
......@@ -1188,7 +1188,7 @@ struct intel_gen6_power_mgmt {
bool client_boost;
bool enabled;
struct delayed_work delayed_resume_work;
struct delayed_work autoenable_work;
unsigned boosts;
struct intel_rps_client semaphores, mmioflips;
......
......@@ -2842,6 +2842,7 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
intel_enable_gt_powersave(dev_priv);
i915_update_gfx_val(dev_priv);
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_busy(dev_priv);
......@@ -4979,6 +4980,8 @@ i915_gem_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
......
......@@ -15456,7 +15456,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_init_clock_gating(dev);
intel_enable_gt_powersave(dev_priv);
}
/*
......
......@@ -1690,10 +1690,11 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
......
......@@ -6536,6 +6536,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
mutex_unlock(&dev_priv->rps.hw_lock);
intel_autoenable_gt_powersave(dev_priv);
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
......@@ -6549,13 +6551,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
{
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
gen6_disable_rps_interrupts(dev_priv);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev_priv: i915 device
......@@ -6569,50 +6564,63 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
gen6_suspend_rps(dev_priv);
if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
intel_runtime_pm_put(dev_priv);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
/* gen6_rps_idle() will be called later to disable interrupts */
}
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
dev_priv->rps.enabled = true; /* force disabling */
intel_disable_gt_powersave(dev_priv);
gen6_reset_rps_interrupts(dev_priv);
}
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE_M(dev_priv)) {
ironlake_disable_drps(dev_priv);
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
intel_suspend_gt_powersave(dev_priv);
if (!READ_ONCE(dev_priv->rps.enabled))
return;
mutex_lock(&dev_priv->rps.hw_lock);
if (INTEL_INFO(dev_priv)->gen >= 9) {
gen9_disable_rc6(dev_priv);
gen9_disable_rps(dev_priv);
} else if (IS_CHERRYVIEW(dev_priv))
cherryview_disable_rps(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_disable_rps(dev_priv);
else
gen6_disable_rps(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
if (INTEL_GEN(dev_priv) >= 9) {
gen9_disable_rc6(dev_priv);
gen9_disable_rps(dev_priv);
} else if (IS_CHERRYVIEW(dev_priv)) {
cherryview_disable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_disable_rps(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 6) {
gen6_disable_rps(dev_priv);
} else if (IS_IRONLAKE_M(dev_priv)) {
ironlake_disable_drps(dev_priv);
}
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
static void intel_gen6_powersave_work(struct work_struct *work)
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
/* We shouldn't be disabling as we submit, so this should be less
* racy than it appears!
*/
if (READ_ONCE(dev_priv->rps.enabled))
return;
mutex_lock(&dev_priv->rps.hw_lock);
/* Powersaving is controlled by the host when inside a VM */
if (intel_vgpu_active(dev_priv))
return;
gen6_reset_rps_interrupts(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_CHERRYVIEW(dev_priv)) {
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
} else if (INTEL_INFO(dev_priv)->gen >= 9) {
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
......@@ -6620,9 +6628,12 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
__gen6_update_ring_freq(dev_priv);
} else {
} else if (INTEL_GEN(dev_priv) >= 6) {
gen6_enable_rps(dev_priv);
__gen6_update_ring_freq(dev_priv);
} else if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
intel_init_emon(dev_priv);
}
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
......@@ -6632,18 +6643,47 @@ static void intel_gen6_powersave_work(struct work_struct *work)
WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
}
gen6_enable_rps_interrupts(dev_priv);
static void __intel_autoenable_gt_powersave(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
struct intel_engine_cs *rcs;
struct drm_i915_gem_request *req;
mutex_unlock(&dev_priv->rps.hw_lock);
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
rcs = &dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
if (!rcs->init_context)
goto out;
mutex_lock(&dev_priv->drm.struct_mutex);
req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
if (IS_ERR(req))
goto unlock;
if (!i915.enable_execlists && i915_switch_context(req) == 0)
rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */
i915_add_request_no_flush(req);
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out:
intel_runtime_pm_put(dev_priv);
}
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
{
/* Powersaving is controlled by the host when inside a VM */
if (intel_vgpu_active(dev_priv))
if (READ_ONCE(dev_priv->rps.enabled))
return;
if (IS_IRONLAKE_M(dev_priv)) {
......@@ -6664,21 +6704,13 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
* paths, so the _noresume version is enough (and in case of
* runtime resume it's necessary).
*/
if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
round_jiffies_up_relative(HZ)))
if (queue_delayed_work(dev_priv->wq,
&dev_priv->rps.autoenable_work,
round_jiffies_up_relative(HZ)))
intel_runtime_pm_get_noresume(dev_priv);
}
}
void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen < 6)
return;
gen6_suspend_rps(dev_priv);
dev_priv->rps.enabled = false;
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
......@@ -7785,8 +7817,8 @@ void intel_pm_setup(struct drm_device *dev)
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
__intel_autoenable_gt_powersave);
INIT_LIST_HEAD(&dev_priv->rps.clients);
INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
......
......@@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
}
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment