Commit 8cac6f6c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor blocking waits

Tidy up the for loops that handle waiting for read/write vs read-only
access.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-13-git-send-email-chris@chris-wilson.co.uk
parent d72d908b
...@@ -1339,6 +1339,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1339,6 +1339,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
} }
static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;
if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}
/** /**
* Ensures that all rendering to the object has completed and the object is * Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
...@@ -1349,39 +1366,34 @@ int ...@@ -1349,39 +1366,34 @@ int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly) bool readonly)
{ {
struct drm_i915_gem_request *request;
struct reservation_object *resv; struct reservation_object *resv;
int ret, i; struct i915_gem_active *active;
unsigned long active_mask;
int idx, ret;
if (readonly) { lockdep_assert_held(&obj->base.dev->struct_mutex);
request = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (request) {
ret = i915_wait_request(request);
if (ret)
return ret;
i = request->engine->id; if (!readonly) {
if (i915_gem_active_peek(&obj->last_read[i], active = obj->last_read;
&obj->base.dev->struct_mutex) == request) active_mask = obj->active;
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
} else { } else {
for (i = 0; i < I915_NUM_ENGINES; i++) { active_mask = 1;
request = i915_gem_active_peek(&obj->last_read[i], active = &obj->last_write;
&obj->base.dev->struct_mutex); }
if (!request)
continue;
ret = i915_wait_request(request); for_each_active(active_mask, idx) {
if (ret) struct drm_i915_gem_request *request;
return ret;
i915_gem_object_retire__read(obj, i); request = i915_gem_active_peek(&active[idx],
} &obj->base.dev->struct_mutex);
GEM_BUG_ON(obj->active); if (!request)
continue;
ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_object_retire_request(obj, request);
} }
resv = i915_gem_object_get_dmabuf_resv(obj); resv = i915_gem_object_get_dmabuf_resv(obj);
...@@ -1397,23 +1409,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -1397,23 +1409,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;
if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine /* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call. * as the object state may change during this call.
*/ */
...@@ -1425,34 +1420,31 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1425,34 +1420,31 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
struct i915_gem_active *active;
unsigned long active_mask;
int ret, i, n = 0; int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible); BUG_ON(!dev_priv->mm.interruptible);
if (!obj->active) active_mask = obj->active;
if (!active_mask)
return 0; return 0;
if (readonly) { if (!readonly) {
struct drm_i915_gem_request *req; active = obj->last_read;
req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req == NULL)
return 0;
requests[n++] = req;
} else { } else {
for (i = 0; i < I915_NUM_ENGINES; i++) { active_mask = 1;
struct drm_i915_gem_request *req; active = &obj->last_write;
}
req = i915_gem_active_get(&obj->last_read[i], for_each_active(active_mask, i) {
&obj->base.dev->struct_mutex); struct drm_i915_gem_request *req;
if (req == NULL)
continue;
req = i915_gem_active_get(&active[i],
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req; requests[n++] = req;
}
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2934,33 +2926,33 @@ int ...@@ -2934,33 +2926,33 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj, i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to) struct drm_i915_gem_request *to)
{ {
const bool readonly = obj->base.pending_write_domain == 0; struct i915_gem_active *active;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; unsigned long active_mask;
int ret, i, n; int idx;
if (!obj->active) lockdep_assert_held(&obj->base.dev->struct_mutex);
return 0;
n = 0; active_mask = obj->active;
if (readonly) { if (!active_mask)
struct drm_i915_gem_request *req; return 0;
req = i915_gem_active_peek(&obj->last_write, if (obj->base.pending_write_domain) {
&obj->base.dev->struct_mutex); active = obj->last_read;
if (req)
requests[n++] = req;
} else { } else {
for (i = 0; i < I915_NUM_ENGINES; i++) { active_mask = 1;
struct drm_i915_gem_request *req; active = &obj->last_write;
req = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
} }
for (i = 0; i < n; i++) {
ret = __i915_gem_object_sync(obj, to, requests[i]); for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
int ret;
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;
ret = __i915_gem_object_sync(obj, to, request);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment