Commit 168cf367 authored by Chris Wilson's avatar Chris Wilson

drm/i915/shrinker: Refactor common uninterruptible locking

Both the oom and vmap notifier callbacks have a loop to acquire the
struct_mutex and set the device as uninterruptible, within a certain
time. Refactor the common code into a pair of functions.
Suggested-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1459848145-24042-1-git-send-email-chris@chris-wilson.co.ukReviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent e87666b5
...@@ -289,35 +289,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -289,35 +289,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed; return freed;
} }
struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
};
static bool
i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu,
int timeout_ms)
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
if (--timeout == 0) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;
}
}
slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
return true;
}
static void
i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static int static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier); container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_device *dev = dev_priv->dev; struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed_pages; unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE; return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
* being pointed to by hardware. * being pointed to by hardware.
...@@ -342,8 +363,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -342,8 +363,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bound += obj->base.size; bound += obj->base.size;
} }
if (unlock) i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
mutex_unlock(&dev->struct_mutex);
if (freed_pages || unbound || bound) if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
...@@ -362,30 +382,15 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -362,30 +382,15 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct drm_device *dev = dev_priv->dev; struct shrinker_lock_uninterruptible slu;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long freed_pages; unsigned long freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU vmaps due to lock contention.\n");
return NOTIFY_DONE; return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible; i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (unlock)
mutex_unlock(&dev->struct_mutex);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE; return NOTIFY_DONE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment