Commit efdf7c06 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Rename request->list to link for consistency

We use "list" to denote the list and "link" to denote an element on that
list. Rename request->list to match this idiom.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-14-git-send-email-chris@chris-wilson.co.uk
parent 8cac6f6c
......@@ -746,13 +746,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count;
count = 0;
list_for_each_entry(req, &engine->request_list, list)
list_for_each_entry(req, &engine->request_list, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->request_list, list) {
list_for_each_entry(req, &engine->request_list, link) {
struct task_struct *task;
rcu_read_lock();
......
......@@ -2475,7 +2475,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
list_for_each_entry(request, &engine->request_list, list) {
list_for_each_entry(request, &engine->request_list, link) {
if (i915_gem_request_completed(request))
continue;
......@@ -2497,7 +2497,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
i915_set_reset_status(request->ctx, ring_hung);
list_for_each_entry_continue(request, &engine->request_list, list)
list_for_each_entry_continue(request, &engine->request_list, link)
i915_set_reset_status(request->ctx, false);
}
......@@ -2546,7 +2546,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
request = list_last_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
link);
i915_gem_request_retire_upto(request);
}
......@@ -2609,7 +2609,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
link);
if (!i915_gem_request_completed(request))
break;
......@@ -2629,7 +2629,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
engine_list[engine->id]);
if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
&obj->base.dev->struct_mutex)->list))
&obj->base.dev->struct_mutex)->link))
break;
i915_gem_object_retire__read(obj, engine->id);
......
......@@ -160,7 +160,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
trace_i915_gem_request_retire(request);
list_del_init(&request->list);
list_del_init(&request->link);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
......@@ -191,12 +191,12 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
lockdep_assert_held(&req->i915->drm.struct_mutex);
if (list_empty(&req->list))
if (list_empty(&req->link))
return;
do {
tmp = list_first_entry(&engine->request_list,
typeof(*tmp), list);
typeof(*tmp), link);
i915_gem_request_retire(tmp);
} while (tmp != req);
......@@ -317,7 +317,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->request_list,
typeof(*req), list);
typeof(*req), link);
if (req && i915_gem_request_completed(req))
i915_gem_request_retire(req);
......@@ -450,7 +450,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->emitted_jiffies = jiffies;
request->previous_seqno = engine->last_submitted_seqno;
smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
list_add_tail(&request->list, &engine->request_list);
list_add_tail(&request->link, &engine->request_list);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
......@@ -570,7 +570,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
might_sleep();
if (list_empty(&req->list))
if (list_empty(&req->link))
return 0;
if (i915_gem_request_completed(req))
......
......@@ -105,8 +105,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** global list entry for this request */
struct list_head list;
/** engine->request_list entry for this request */
struct list_head link;
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
......
......@@ -1168,7 +1168,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_gem_record_active_context(engine, error, ee);
count = 0;
list_for_each_entry(request, &engine->request_list, list)
list_for_each_entry(request, &engine->request_list, link)
count++;
ee->num_requests = count;
......@@ -1180,7 +1180,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
}
count = 0;
list_for_each_entry(request, &engine->request_list, list) {
list_for_each_entry(request, &engine->request_list, link) {
struct drm_i915_error_request *erq;
if (count >= ee->num_requests) {
......
......@@ -2235,7 +2235,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
req = list_entry(engine->request_list.prev,
struct drm_i915_gem_request,
list);
link);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
......@@ -2284,7 +2284,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
*/
GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &engine->request_list, list) {
list_for_each_entry(target, &engine->request_list, link) {
unsigned space;
/*
......@@ -2302,7 +2302,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
break;
}
if (WARN_ON(&target->list == &engine->request_list))
if (WARN_ON(&target->link == &engine->request_list))
return -ENOSPC;
return i915_wait_request(target);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment