Commit 56cea323 authored by Joonas Lahtinen's avatar Joonas Lahtinen

drm/i915: Unify global_list into global_link

$ sed -i -r 's/\bglobal_list\b/global_link/g' *.c *.h

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1478081764-8058-1-git-send-email-joonas.lahtinen@linux.intel.com
parent 3ac168a7
......@@ -217,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
......@@ -227,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
......@@ -390,7 +390,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
......@@ -407,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
......@@ -493,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
......
......@@ -2235,7 +2235,7 @@ struct drm_i915_gem_object {
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
struct list_head global_link;
union {
struct rcu_head rcu;
struct llist_node freed;
......
......@@ -1446,7 +1446,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
i915 = to_i915(obj->base.dev);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
list_move_tail(&obj->global_list, list);
list_move_tail(&obj->global_link, list);
}
/**
......@@ -2967,7 +2967,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (--obj->bind_count == 0)
list_move_tail(&obj->global_list,
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
......@@ -3164,7 +3164,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
obj->bind_count++;
......@@ -4125,7 +4125,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
......@@ -4272,7 +4272,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
list_del(&obj->global_list);
list_del(&obj->global_link);
}
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
......@@ -4847,7 +4847,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
list_for_each_entry(obj, *p, global_list) {
list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
......
......@@ -3296,7 +3296,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
&dev_priv->mm.bound_list, global_list) {
&dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
......@@ -3376,7 +3376,7 @@ i915_vma_retire(struct i915_gem_active *active,
* (unless we are forced to ofc!)
*/
if (obj->bind_count)
list_move_tail(&obj->global_list, &rq->i915->mm.bound_list);
list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
obj->mm.dirty = true; /* be paranoid */
......
......@@ -199,10 +199,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
global_list))) {
list_move_tail(&obj->global_list, &still_in_list);
global_link))) {
list_move_tail(&obj->global_link, &still_in_list);
if (!obj->mm.pages) {
list_del_init(&obj->global_list);
list_del_init(&obj->global_link);
continue;
}
......@@ -228,7 +228,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
I915_MM_SHRINKER);
if (!obj->mm.pages) {
__i915_gem_object_invalidate(obj);
list_del_init(&obj->global_list);
list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
......@@ -293,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
......@@ -398,7 +398,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (!obj->mm.pages)
continue;
......@@ -407,7 +407,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
else
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!obj->mm.pages)
continue;
......
......@@ -723,7 +723,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
return obj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment