Commit 55d80d23 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/i915: Remove stallcheck special handling, v3.

Both intel_unpin_work.pending and intel_unpin_work.enable_stall_check
were used to see if work should be enabled. By only using pending
some special cases are gone, and access to unpin_work can be simplified.

A flip could previously be queued before
stallcheck was active. With the addition of the pending member
enable_stall_check became obsolete and can thus be removed.

Use this to only access work members untilintel_mark_page_flip_active
is called, or intel_queue_mmio_flip is used. This will prevent
use-after-free, and makes it easier to verify accesses.

Changes since v1:
- Reword commit message.
- Do not access unpin_work after intel_mark_page_flip_active.
- Add the right memory barriers.
Changes since v2:
- atomic_read() needs a full smp_rmb.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-3-git-send-email-maarten.lankhorst@linux.intel.comReviewed-by: default avatarPatrik Jakobsson <patrik.jakobsson@linux.intel.com>
parent af61d5ce
...@@ -615,9 +615,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -615,9 +615,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
u32 pending;
u32 addr; u32 addr;
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { pending = atomic_read(&work->pending);
if (pending == INTEL_FLIP_INACTIVE) {
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane);
} else if (pending >= INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
...@@ -639,10 +644,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -639,10 +644,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
work->flip_queued_vblank, work->flip_queued_vblank,
work->flip_ready_vblank, work->flip_ready_vblank,
drm_crtc_vblank_count(&crtc->base)); drm_crtc_vblank_count(&crtc->base));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
......
...@@ -3813,8 +3813,6 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) ...@@ -3813,8 +3813,6 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_unpin_work *work = intel_crtc->unpin_work; struct intel_unpin_work *work = intel_crtc->unpin_work;
/* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb();
intel_crtc->unpin_work = NULL; intel_crtc->unpin_work = NULL;
if (work->event) if (work->event)
...@@ -10890,15 +10888,12 @@ static void do_intel_finish_page_flip(struct drm_i915_private *dev_priv, ...@@ -10890,15 +10888,12 @@ static void do_intel_finish_page_flip(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work; work = intel_crtc->unpin_work;
/* Ensure we don't miss a work->pending update ... */ if (work && atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) {
/* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb(); smp_rmb();
if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
page_flip_completed(intel_crtc); page_flip_completed(intel_crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
} }
...@@ -10995,10 +10990,8 @@ void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane) ...@@ -10995,10 +10990,8 @@ void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane)
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
{ {
/* Ensure that the work item is consistent when activating it ... */ /* Ensure that the work item is consistent when activating it ... */
smp_wmb(); smp_mb__before_atomic();
atomic_set(&work->pending, INTEL_FLIP_PENDING); atomic_set(&work->pending, INTEL_FLIP_PENDING);
/* and that it is marked active as soon as the irq could fire. */
smp_wmb();
} }
static int intel_gen2_queue_flip(struct drm_device *dev, static int intel_gen2_queue_flip(struct drm_device *dev,
...@@ -11032,7 +11025,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, ...@@ -11032,7 +11025,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */ intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0; return 0;
} }
...@@ -11064,7 +11056,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, ...@@ -11064,7 +11056,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0; return 0;
} }
...@@ -11103,7 +11094,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, ...@@ -11103,7 +11094,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc); intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0; return 0;
} }
...@@ -11139,7 +11129,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, ...@@ -11139,7 +11129,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc); intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0; return 0;
} }
...@@ -11234,7 +11223,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev, ...@@ -11234,7 +11223,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP)); intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0; return 0;
} }
...@@ -11361,8 +11349,6 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) ...@@ -11361,8 +11349,6 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
if (work == NULL) if (work == NULL)
return; return;
intel_mark_page_flip_active(work);
intel_pipe_update_start(crtc); intel_pipe_update_start(crtc);
if (INTEL_INFO(mmio_flip->i915)->gen >= 9) if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
...@@ -11372,6 +11358,8 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) ...@@ -11372,6 +11358,8 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
ilk_do_mmio_flip(crtc, work); ilk_do_mmio_flip(crtc, work);
intel_pipe_update_end(crtc); intel_pipe_update_end(crtc);
intel_mark_page_flip_active(work);
} }
static void intel_mmio_flip_work_func(struct work_struct *work) static void intel_mmio_flip_work_func(struct work_struct *work)
...@@ -11437,15 +11425,14 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, ...@@ -11437,15 +11425,14 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work = intel_crtc->unpin_work; struct intel_unpin_work *work = intel_crtc->unpin_work;
u32 addr; u32 addr;
u32 pending;
if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) pending = atomic_read(&work->pending);
return true; /* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb();
if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
return false;
if (!work->enable_stall_check) if (pending != INTEL_FLIP_PENDING)
return false; return pending == INTEL_FLIP_COMPLETE;
if (work->flip_ready_vblank == 0) { if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req && if (work->flip_queued_req &&
...@@ -11626,6 +11613,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11626,6 +11613,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/ */
if (!mmio_flip) { if (!mmio_flip) {
ret = i915_gem_object_sync(obj, engine, &request); ret = i915_gem_object_sync(obj, engine, &request);
if (!ret && !request) {
request = i915_gem_request_alloc(engine, NULL);
ret = PTR_ERR_OR_ZERO(request);
}
if (ret) if (ret)
goto cleanup_pending; goto cleanup_pending;
} }
...@@ -11639,36 +11631,29 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11639,36 +11631,29 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset += intel_crtc->dspaddr_offset; work->gtt_offset += intel_crtc->dspaddr_offset;
if (mmio_flip) { if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, obj); work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
if (ret)
goto cleanup_unpin;
i915_gem_request_assign(&work->flip_queued_req, i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req); obj->last_write_req);
} else {
if (!request) {
request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
}
}
ret = intel_queue_mmio_flip(dev, crtc, obj);
if (ret)
goto cleanup_unpin;
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags); page_flip_flags);
if (ret) if (ret)
goto cleanup_unpin; goto cleanup_unpin;
i915_gem_request_assign(&work->flip_queued_req, request); i915_gem_request_assign(&work->flip_queued_req, request);
}
if (request)
i915_add_request_no_flush(request);
work->flip_queued_vblank = drm_crtc_vblank_count(crtc); work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true; intel_mark_page_flip_active(work);
i915_add_request_no_flush(request);
}
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit); to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -992,7 +992,6 @@ struct intel_unpin_work { ...@@ -992,7 +992,6 @@ struct intel_unpin_work {
struct drm_i915_gem_request *flip_queued_req; struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank; u32 flip_queued_vblank;
u32 flip_ready_vblank; u32 flip_ready_vblank;
bool enable_stall_check;
}; };
struct intel_load_detect_pipe { struct intel_load_detect_pipe {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment