Commit 8cac6f6c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor blocking waits

Tidy up the for loops that handle waiting for read/write vs read-only
access.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-13-git-send-email-chris@chris-wilson.co.uk
parent d72d908b
......@@ -1339,6 +1339,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
}
static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;
if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
......@@ -1349,28 +1366,25 @@ int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_i915_gem_request *request;
struct reservation_object *resv;
int ret, i;
struct i915_gem_active *active;
unsigned long active_mask;
int idx, ret;
if (readonly) {
request = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (request) {
ret = i915_wait_request(request);
if (ret)
return ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
i = request->engine->id;
if (i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex) == request)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
if (!readonly) {
active = obj->last_read;
active_mask = obj->active;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
request = i915_gem_active_peek(&obj->last_read[i],
active_mask = 1;
active = &obj->last_write;
}
for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;
......@@ -1379,9 +1393,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
i915_gem_object_retire__read(obj, i);
}
GEM_BUG_ON(obj->active);
i915_gem_object_retire_request(obj, request);
}
resv = i915_gem_object_get_dmabuf_resv(obj);
......@@ -1397,23 +1409,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
return 0;
}
static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;
if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call.
*/
......@@ -1425,35 +1420,32 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
struct i915_gem_active *active;
unsigned long active_mask;
int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
if (!obj->active)
return 0;
if (readonly) {
struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req == NULL)
active_mask = obj->active;
if (!active_mask)
return 0;
requests[n++] = req;
if (!readonly) {
active = obj->last_read;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
active_mask = 1;
active = &obj->last_write;
}
for_each_active(active_mask, i) {
struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_read[i],
req = i915_gem_active_get(&active[i],
&obj->base.dev->struct_mutex);
if (req == NULL)
continue;
if (req)
requests[n++] = req;
}
}
mutex_unlock(&dev->struct_mutex);
ret = 0;
......@@ -2934,33 +2926,33 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to)
{
const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n;
struct i915_gem_active *active;
unsigned long active_mask;
int idx;
if (!obj->active)
return 0;
lockdep_assert_held(&obj->base.dev->struct_mutex);
n = 0;
if (readonly) {
struct drm_i915_gem_request *req;
active_mask = obj->active;
if (!active_mask)
return 0;
req = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
if (obj->base.pending_write_domain) {
active = obj->last_read;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
active_mask = 1;
active = &obj->last_write;
}
req = i915_gem_active_peek(&obj->last_read[i],
for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
int ret;
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
}
for (i = 0; i < n; i++) {
ret = __i915_gem_object_sync(obj, to, requests[i]);
if (!request)
continue;
ret = __i915_gem_object_sync(obj, to, request);
if (ret)
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment