Commit ce453d81 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use a device flag for non-interruptible phases

The code paths for modesetting are growing in complexity as we may need
to move the buffers around in order to fit the scanout in the aperture.
Therefore we face a choice as to whether to thread the interruptible status
through the entire pinning and unbinding code paths or to add a flag to
the device when we may not be interrupted by a signal. This does the
latter and so fixes a few instances of modesetting failures under stress.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 8408c282
...@@ -616,6 +616,12 @@ typedef struct drm_i915_private { ...@@ -616,6 +616,12 @@ typedef struct drm_i915_private {
*/ */
struct delayed_work retire_work; struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/** /**
* Flag if the X Server, and thus DRM, is not currently in * Flag if the X Server, and thus DRM, is not currently in
* control of the device. * control of the device.
...@@ -1110,8 +1116,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj); ...@@ -1110,8 +1116,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
bool interruptible);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
u32 seqno); u32 seqno);
...@@ -1133,8 +1138,7 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring) ...@@ -1133,8 +1138,7 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
} }
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined, struct intel_ring_buffer *pipelined);
bool interruptible);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
...@@ -1143,8 +1147,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); ...@@ -1143,8 +1147,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains, uint32_t read_domains,
uint32_t write_domain); uint32_t write_domain);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
bool interruptible);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev, void i915_gem_do_init(struct drm_device *dev,
...@@ -1157,8 +1160,7 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring, ...@@ -1157,8 +1160,7 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring, int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno, uint32_t seqno);
bool interruptible);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......
...@@ -1200,7 +1200,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1200,7 +1200,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE) if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj); ret = i915_gem_object_put_fence(obj);
else else
ret = i915_gem_object_get_fence(obj, NULL, true); ret = i915_gem_object_get_fence(obj, NULL);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -1989,8 +1989,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1989,8 +1989,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/ */
int int
i915_wait_request(struct intel_ring_buffer *ring, i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno, uint32_t seqno)
bool interruptible)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier; u32 ier;
...@@ -2043,7 +2042,7 @@ i915_wait_request(struct intel_ring_buffer *ring, ...@@ -2043,7 +2042,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
ring->waiting_seqno = seqno; ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) { if (ring->irq_get(ring)) {
if (interruptible) if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue, ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno) i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged)); || atomic_read(&dev_priv->mm.wedged));
...@@ -2085,8 +2084,7 @@ i915_wait_request(struct intel_ring_buffer *ring, ...@@ -2085,8 +2084,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
*/ */
int int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
bool interruptible)
{ {
int ret; int ret;
...@@ -2099,9 +2097,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -2099,9 +2097,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it. * it.
*/ */
if (obj->active) { if (obj->active) {
ret = i915_wait_request(obj->ring, ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
obj->last_rendering_seqno,
interruptible);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2202,9 +2198,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) ...@@ -2202,9 +2198,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret; return ret;
} }
return i915_wait_request(ring, return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
i915_gem_next_request_seqno(ring),
true);
} }
int int
...@@ -2405,8 +2399,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) ...@@ -2405,8 +2399,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined, struct intel_ring_buffer *pipelined)
bool interruptible)
{ {
int ret; int ret;
...@@ -2425,9 +2418,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, ...@@ -2425,9 +2418,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring, if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) { obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring, ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno, obj->last_fenced_seqno);
interruptible);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2453,7 +2444,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) ...@@ -2453,7 +2444,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode) if (obj->tiling_mode)
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
ret = i915_gem_object_flush_fence(obj, NULL, true); ret = i915_gem_object_flush_fence(obj, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -2530,8 +2521,7 @@ i915_find_fence_reg(struct drm_device *dev, ...@@ -2530,8 +2521,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/ */
int int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj, i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined, struct intel_ring_buffer *pipelined)
bool interruptible)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -2554,8 +2544,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2554,8 +2544,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring, if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) { reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring, ret = i915_wait_request(obj->last_fenced_ring,
reg->setup_seqno, reg->setup_seqno);
interruptible);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2564,9 +2553,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2564,9 +2553,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
} }
} else if (obj->last_fenced_ring && } else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) { obj->last_fenced_ring != pipelined) {
ret = i915_gem_object_flush_fence(obj, ret = i915_gem_object_flush_fence(obj, pipelined);
pipelined,
interruptible);
if (ret) if (ret)
return ret; return ret;
} else if (obj->tiling_changed) { } else if (obj->tiling_changed) {
...@@ -2603,7 +2590,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2603,7 +2590,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL) if (reg == NULL)
return -ENOSPC; return -ENOSPC;
ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret) if (ret)
return ret; return ret;
...@@ -2615,9 +2602,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2615,9 +2602,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode) if (old->tiling_mode)
i915_gem_release_mmap(old); i915_gem_release_mmap(old);
ret = i915_gem_object_flush_fence(old, ret = i915_gem_object_flush_fence(old, pipelined);
pipelined,
interruptible);
if (ret) { if (ret) {
drm_gem_object_unreference(&old->base); drm_gem_object_unreference(&old->base);
return ret; return ret;
...@@ -2940,7 +2925,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -2940,7 +2925,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return ret; return ret;
if (obj->pending_gpu_write || write) { if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2990,7 +2975,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -2990,7 +2975,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */ /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) { if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false); ret = i915_gem_object_wait_rendering(obj);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -3008,8 +2993,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3008,8 +2993,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
} }
int int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
bool interruptible)
{ {
int ret; int ret;
...@@ -3022,7 +3006,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, ...@@ -3022,7 +3006,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return ret; return ret;
} }
return i915_gem_object_wait_rendering(obj, interruptible); return i915_gem_object_wait_rendering(obj);
} }
/** /**
...@@ -3044,7 +3028,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3044,7 +3028,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3142,7 +3126,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, ...@@ -3142,7 +3126,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3842,6 +3826,8 @@ i915_gem_load(struct drm_device *dev) ...@@ -3842,6 +3826,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev); i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue); init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker); register_shrinker(&dev_priv->mm.inactive_shrinker);
......
...@@ -560,7 +560,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -560,7 +560,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) { if (has_fenced_gpu_access) {
if (need_fence) { if (need_fence) {
ret = i915_gem_object_get_fence(obj, ring, 1); ret = i915_gem_object_get_fence(obj, ring);
if (ret) if (ret)
break; break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
...@@ -756,7 +756,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, ...@@ -756,7 +756,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
return i915_gem_object_wait_rendering(obj, true); return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to); idx = intel_ring_sync_index(from, to);
......
...@@ -2067,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2067,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined) struct intel_ring_buffer *pipelined)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment; u32 alignment;
int ret; int ret;
...@@ -2091,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2091,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG(); BUG();
} }
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true); ret = i915_gem_object_pin(obj, alignment, true);
if (ret) if (ret)
return ret; goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined); ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret) if (ret)
...@@ -2105,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2105,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous. * a fence as the cost is not that onerous.
*/ */
if (obj->tiling_mode != I915_TILING_NONE) { if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence(obj, pipelined, false); ret = i915_gem_object_get_fence(obj, pipelined);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
} }
dev_priv->mm.interruptible = true;
return 0; return 0;
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret; return ret;
} }
...@@ -2247,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2247,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we * This should only fail upon a hung GPU, in which case we
* can safely continue. * can safely continue.
*/ */
ret = i915_gem_object_flush_gpu(obj, false); ret = i915_gem_object_flush_gpu(obj);
(void) ret; (void) ret;
} }
...@@ -2994,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) ...@@ -2994,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{ {
if (!enable && intel_crtc->overlay) { if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev; struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
(void) intel_overlay_switch_off(intel_crtc->overlay, false); dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -329,8 +329,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); ...@@ -329,8 +329,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev); extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev);
extern int intel_overlay_switch_off(struct intel_overlay *overlay, extern int intel_overlay_switch_off(struct intel_overlay *overlay);
bool interruptible);
extern int intel_overlay_put_image(struct drm_device *dev, void *data, extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data, extern int intel_overlay_attrs(struct drm_device *dev, void *data,
......
...@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, ...@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request, struct drm_i915_gem_request *request,
bool interruptible,
void (*tail)(struct intel_overlay *)) void (*tail)(struct intel_overlay *))
{ {
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
...@@ -228,8 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -228,8 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
} }
overlay->last_flip_req = request->seqno; overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail; overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv), ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
overlay->last_flip_req, true);
if (ret) if (ret)
return ret; return ret;
...@@ -321,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) ...@@ -321,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true, NULL); ret = intel_overlay_do_wait_request(overlay, request, NULL);
out: out:
if (pipe_a_quirk) if (pipe_a_quirk)
i830_deactivate_pipe_a(dev); i830_deactivate_pipe_a(dev);
...@@ -400,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) ...@@ -400,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
} }
/* overlay needs to be disabled in OCMD reg */ /* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay, static int intel_overlay_off(struct intel_overlay *overlay)
bool interruptible)
{ {
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -436,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay, ...@@ -436,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
return intel_overlay_do_wait_request(overlay, request, interruptible, return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail); intel_overlay_off_tail);
} }
/* recover from an interruption due to a signal /* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */ * We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
bool interruptible)
{ {
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -452,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, ...@@ -452,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0) if (overlay->last_flip_req == 0)
return 0; return 0;
ret = i915_wait_request(LP_RING(dev_priv), ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
overlay->last_flip_req, interruptible);
if (ret) if (ret)
return ret; return ret;
...@@ -498,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) ...@@ -498,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true, ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail); intel_overlay_release_old_vid_tail);
if (ret) if (ret)
return ret; return ret;
...@@ -867,8 +862,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -867,8 +862,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return ret; return ret;
} }
int intel_overlay_switch_off(struct intel_overlay *overlay, int intel_overlay_switch_off(struct intel_overlay *overlay)
bool interruptible)
{ {
struct overlay_registers *regs; struct overlay_registers *regs;
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
...@@ -877,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, ...@@ -877,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
ret = intel_overlay_recover_from_interrupt(overlay, interruptible); ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -892,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, ...@@ -892,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0; regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs); intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay, interruptible); ret = intel_overlay_off(overlay);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -1134,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1134,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay, true); ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
...@@ -1170,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1170,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = intel_overlay_recover_from_interrupt(overlay, true); ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0) if (ret != 0)
goto out_unlock; goto out_unlock;
if (overlay->crtc != crtc) { if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode; struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay, true); ret = intel_overlay_switch_off(overlay);
if (ret != 0) if (ret != 0)
goto out_unlock; goto out_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment