Commit 5008e874 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/i915: Make wait_for_flips interruptible.

Move it from intel_crtc_atomic_commit to prepare_plane_fb.
Waiting is done before committing, otherwise it's too late
to undo the changes.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarAnder Conselvan De Oliveira <ander.conselvan.de.oliveira@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 3abc4e09
...@@ -206,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, ...@@ -206,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
* but since this plane is unchanged just do the * but since this plane is unchanged just do the
* minimum required validation. * minimum required validation.
*/ */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
intel_crtc->atomic.wait_for_flips = true;
crtc_state->base.planes_changed = true; crtc_state->base.planes_changed = true;
} }
......
...@@ -3272,32 +3272,6 @@ void intel_finish_reset(struct drm_device *dev) ...@@ -3272,32 +3272,6 @@ void intel_finish_reset(struct drm_device *dev)
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} }
static void
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible;
WARN_ON(ret);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
...@@ -3918,15 +3892,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) ...@@ -3918,15 +3892,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
work->pending_flip_obj); work->pending_flip_obj);
} }
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
long ret;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc), ret = wait_event_interruptible_timeout(
60*HZ) == 0)) { dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc),
60*HZ);
if (ret < 0)
return ret;
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
spin_lock_irq(&dev->event_lock); spin_lock_irq(&dev->event_lock);
...@@ -3937,11 +3919,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) ...@@ -3937,11 +3919,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
spin_unlock_irq(&dev->event_lock); spin_unlock_irq(&dev->event_lock);
} }
if (crtc->primary->fb) { return 0;
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
}
} }
/* Program iCLKIP clock to the desired frequency */ /* Program iCLKIP clock to the desired frequency */
...@@ -4797,9 +4775,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) ...@@ -4797,9 +4775,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic; struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
if (atomic->wait_for_flips)
intel_crtc_wait_for_pending_flips(&crtc->base);
if (atomic->disable_fbc) if (atomic->disable_fbc)
intel_fbc_disable_crtc(crtc); intel_fbc_disable_crtc(crtc);
...@@ -11678,7 +11653,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, ...@@ -11678,7 +11653,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
switch (plane->type) { switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY: case DRM_PLANE_TYPE_PRIMARY:
intel_crtc->atomic.wait_for_flips = true;
intel_crtc->atomic.pre_disable_primary = turn_off; intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on; intel_crtc->atomic.post_enable_primary = turn_on;
...@@ -13172,6 +13146,30 @@ static int intel_atomic_check(struct drm_device *dev, ...@@ -13172,6 +13146,30 @@ static int intel_atomic_check(struct drm_device *dev,
return 0; return 0;
} }
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i, ret;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
return ret;
}
/** /**
* intel_atomic_commit - commit validated state object * intel_atomic_commit - commit validated state object
* @dev: DRM device * @dev: DRM device
...@@ -13199,12 +13197,7 @@ static int intel_atomic_commit(struct drm_device *dev, ...@@ -13199,12 +13197,7 @@ static int intel_atomic_commit(struct drm_device *dev,
int i; int i;
bool any_ms = false; bool any_ms = false;
if (async) { ret = intel_atomic_prepare_commit(dev, state, async);
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) if (ret)
return ret; return ret;
...@@ -13464,6 +13457,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -13464,6 +13457,29 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret) if (ret)
return ret; return ret;
if (old_obj) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret && ret != -EIO)
goto out;
}
if (!obj) { if (!obj) {
ret = 0; ret = 0;
} else if (plane->type == DRM_PLANE_TYPE_CURSOR && } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
...@@ -13479,6 +13495,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -13479,6 +13495,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret == 0) if (ret == 0)
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
out:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
......
...@@ -525,7 +525,6 @@ struct intel_mmio_flip { ...@@ -525,7 +525,6 @@ struct intel_mmio_flip {
*/ */
struct intel_crtc_atomic_commit { struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */ /* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc; bool disable_fbc;
bool disable_ips; bool disable_ips;
bool disable_cxsr; bool disable_cxsr;
...@@ -1190,7 +1189,6 @@ enum intel_display_power_domain ...@@ -1190,7 +1189,6 @@ enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder); intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment