Commit 548625ee authored by Chris Wilson's avatar Chris Wilson

drm/i915: Improve lockdep tracking for obj->mm.lock

The shrinker may appear to recurse into obj->mm.lock as the shrinker may
be called from a direct reclaim path whilst handling get_pages. We
filter out recursing on the same obj->mm.lock by inspecting
obj->mm.pages, but we do want to take the lock on a second object in
order to reap their pages. lockdep spots the recursion on the same
lockclass and needs annotation to avoid a false positive. To keep the
two paths distinct, create an enum to indicate which subclass of
obj->mm.lock we are using. This removes the false positive and avoids
masking real bugs.
Suggested-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161101121134.27504-1-chris@chris-wilson.co.ukReviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent db6c2b41
...@@ -3287,7 +3287,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -3287,7 +3287,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
} }
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
I915_MM_NORMAL = 0,
I915_MM_SHRINKER
};
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type { enum i915_map_type {
......
...@@ -491,7 +491,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -491,7 +491,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
__i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
if (obj->mm.pages) if (obj->mm.pages)
return -EBUSY; return -EBUSY;
...@@ -2181,7 +2181,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -2181,7 +2181,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
radix_tree_delete(&obj->mm.get_page.radix, iter.index); radix_tree_delete(&obj->mm.get_page.radix, iter.index);
} }
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{ {
struct sg_table *pages; struct sg_table *pages;
...@@ -2193,7 +2194,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2193,7 +2194,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return; return;
/* May be called by shrinker from within get_pages() (on another bo) */ /* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&obj->mm.lock, subclass);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
goto unlock; goto unlock;
...@@ -4283,7 +4284,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4283,7 +4284,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0); atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(obj->mm.pages); GEM_BUG_ON(obj->mm.pages);
if (obj->base.import_attach) if (obj->base.import_attach)
......
...@@ -111,7 +111,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -111,7 +111,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{ {
if (i915_gem_object_unbind(obj) == 0) if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
return !READ_ONCE(obj->mm.pages); return !READ_ONCE(obj->mm.pages);
} }
...@@ -225,7 +225,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -225,7 +225,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (unsafe_drop_pages(obj)) { if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */ /* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock, mutex_lock_nested(&obj->mm.lock,
SINGLE_DEPTH_NESTING); I915_MM_SHRINKER);
if (!obj->mm.pages) { if (!obj->mm.pages) {
__i915_gem_object_invalidate(obj); __i915_gem_object_invalidate(obj);
list_del_init(&obj->global_list); list_del_init(&obj->global_list);
......
...@@ -75,7 +75,7 @@ static void cancel_userptr(struct work_struct *work) ...@@ -75,7 +75,7 @@ static void cancel_userptr(struct work_struct *work)
/* We are inside a kthread context and can't be interrupted */ /* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0) if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
WARN_ONCE(obj->mm.pages, WARN_ONCE(obj->mm.pages,
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n", "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
obj->bind_count, obj->bind_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment