Commit c004a90b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Restore nonblocking awaits for modesetting

After combining the dma-buf reservation object and the GEM reservation
object, we lost the ability to do a nonblocking wait on the i915 request
(as we blocked upon the reservation object during prepare_fb). We can
instead convert the reservation object into a fence upon which we can
asynchronously wait (including a forced timeout in case the DMA fence is
never signaled).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-22-chris@chris-wilson.co.uk
parent d07f0e59
...@@ -14510,12 +14510,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -14510,12 +14510,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
static void intel_atomic_commit_work(struct work_struct *work) static void intel_atomic_commit_work(struct work_struct *work)
{ {
struct drm_atomic_state *state = container_of(work, struct drm_atomic_state *state =
struct drm_atomic_state, container_of(work, struct drm_atomic_state, commit_work);
commit_work);
intel_atomic_commit_tail(state); intel_atomic_commit_tail(state);
} }
static int __i915_sw_fence_call
intel_atomic_commit_ready(struct i915_sw_fence *fence,
enum i915_sw_fence_notify notify)
{
struct intel_atomic_state *state =
container_of(fence, struct intel_atomic_state, commit_ready);
switch (notify) {
case FENCE_COMPLETE:
if (state->base.commit_work.func)
queue_work(system_unbound_wq, &state->base.commit_work);
break;
case FENCE_FREE:
drm_atomic_state_put(&state->base);
break;
}
return NOTIFY_DONE;
}
static void intel_atomic_track_fbs(struct drm_atomic_state *state) static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{ {
struct drm_plane_state *old_plane_state; struct drm_plane_state *old_plane_state;
...@@ -14561,11 +14582,14 @@ static int intel_atomic_commit(struct drm_device *dev, ...@@ -14561,11 +14582,14 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
INIT_WORK(&state->commit_work, intel_atomic_commit_work); drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
intel_atomic_commit_ready);
ret = intel_atomic_prepare_commit(dev, state); ret = intel_atomic_prepare_commit(dev, state);
if (ret) { if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&intel_state->commit_ready);
return ret; return ret;
} }
...@@ -14576,10 +14600,14 @@ static int intel_atomic_commit(struct drm_device *dev, ...@@ -14576,10 +14600,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state); intel_atomic_track_fbs(state);
drm_atomic_state_get(state); drm_atomic_state_get(state);
if (nonblock) INIT_WORK(&state->commit_work,
queue_work(system_unbound_wq, &state->commit_work); nonblock ? intel_atomic_commit_work : NULL);
else
i915_sw_fence_commit(&intel_state->commit_ready);
if (!nonblock) {
i915_sw_fence_wait(&intel_state->commit_ready);
intel_atomic_commit_tail(state); intel_atomic_commit_tail(state);
}
return 0; return 0;
} }
...@@ -14691,20 +14719,22 @@ int ...@@ -14691,20 +14719,22 @@ int
intel_prepare_plane_fb(struct drm_plane *plane, intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state) struct drm_plane_state *new_state)
{ {
struct intel_atomic_state *intel_state =
to_intel_atomic_state(new_state->state);
struct drm_device *dev = plane->dev; struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = new_state->fb; struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
long lret; int ret;
int ret = 0;
if (!obj && !old_obj) if (!obj && !old_obj)
return 0; return 0;
if (old_obj) { if (old_obj) {
struct drm_crtc_state *crtc_state = struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); drm_atomic_get_existing_crtc_state(new_state->state,
plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending /* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the * MI_WAIT_FOR_EVENT inside a user batch buffer on the
...@@ -14717,31 +14747,36 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -14717,31 +14747,36 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* This should only fail upon a hung GPU, in which case we * This should only fail upon a hung GPU, in which case we
* can safely continue. * can safely continue.
*/ */
if (needs_modeset(crtc_state)) if (needs_modeset(crtc_state)) {
ret = i915_gem_object_wait(old_obj, ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
I915_WAIT_INTERRUPTIBLE | old_obj->resv, NULL,
I915_WAIT_LOCKED, false, 0,
MAX_SCHEDULE_TIMEOUT, GFP_KERNEL);
NULL); if (ret < 0)
if (ret) { return ret;
/* GPU hangs should have been swallowed by the wait */
WARN_ON(ret == -EIO);
return ret;
} }
} }
if (new_state->fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
new_state->fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
}
if (!obj) if (!obj)
return 0; return 0;
/* For framebuffer backed by dmabuf, wait for fence */ if (!new_state->fence) { /* implicit fencing */
lret = i915_gem_object_wait(obj, ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, obj->resv, NULL,
MAX_SCHEDULE_TIMEOUT, false, I915_FENCE_TIMEOUT,
NULL); GFP_KERNEL);
if (lret == -ERESTARTSYS) if (ret < 0)
return lret; return ret;
}
WARN(lret < 0, "waiting returns %li\n", lret);
if (plane->type == DRM_PLANE_TYPE_CURSOR && if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) { INTEL_INFO(dev)->cursor_needs_physical) {
......
...@@ -365,6 +365,8 @@ struct intel_atomic_state { ...@@ -365,6 +365,8 @@ struct intel_atomic_state {
/* Gen9+ only */ /* Gen9+ only */
struct skl_wm_values wm_results; struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
}; };
struct intel_plane_state { struct intel_plane_state {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment