Commit 6afb0750 authored by Rob Clark's avatar Rob Clark

drm/msm: Reorganize msm_gem_shrinker_scan()

So we don't have to duplicate the boilerplate for eviction.

This also lets us re-use the main scan loop for vmap shrinker.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20210405174532.1441497-3-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 90643a24
...@@ -17,21 +17,35 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -17,21 +17,35 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
return priv->shrinkable_count; return priv->shrinkable_count;
} }
static bool
purge(struct msm_gem_object *msm_obj)
{
if (!is_purgeable(msm_obj))
return false;
/*
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge(&msm_obj->base);
return true;
}
static unsigned long static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
bool (*shrink)(struct msm_gem_object *msm_obj))
{ {
struct msm_drm_private *priv = unsigned freed = 0;
container_of(shrinker, struct msm_drm_private, shrinker);
struct list_head still_in_list; struct list_head still_in_list;
unsigned long freed = 0;
INIT_LIST_HEAD(&still_in_list); INIT_LIST_HEAD(&still_in_list);
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
while (freed < sc->nr_to_scan) { while (freed < nr_to_scan) {
struct msm_gem_object *msm_obj = list_first_entry_or_null( struct msm_gem_object *msm_obj = list_first_entry_or_null(
&priv->inactive_dontneed, typeof(*msm_obj), mm_list); list, typeof(*msm_obj), mm_list);
if (!msm_obj) if (!msm_obj)
break; break;
...@@ -62,14 +76,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -62,14 +76,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!msm_gem_trylock(&msm_obj->base)) if (!msm_gem_trylock(&msm_obj->base))
goto tail; goto tail;
if (is_purgeable(msm_obj)) { if (shrink(msm_obj))
/*
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge(&msm_obj->base);
freed += msm_obj->base.size >> PAGE_SHIFT; freed += msm_obj->base.size >> PAGE_SHIFT;
}
msm_gem_unlock(&msm_obj->base); msm_gem_unlock(&msm_obj->base);
tail: tail:
...@@ -77,16 +86,25 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -77,16 +86,25 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
} }
list_splice_tail(&still_in_list, &priv->inactive_dontneed); list_splice_tail(&still_in_list, list);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
if (freed > 0) { return freed;
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
unsigned long freed;
freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT); trace_msm_gem_purge(freed << PAGE_SHIFT);
} else {
return SHRINK_STOP;
}
return freed; return (freed > 0) ? freed : SHRINK_STOP;
} }
/* since we don't know any better, lets bail after a few /* since we don't know any better, lets bail after a few
...@@ -95,29 +113,15 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -95,29 +113,15 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
*/ */
static const int vmap_shrink_limit = 15; static const int vmap_shrink_limit = 15;
static unsigned static bool
vmap_shrink(struct list_head *mm_list) vmap_shrink(struct msm_gem_object *msm_obj)
{ {
struct msm_gem_object *msm_obj; if (!is_vunmapable(msm_obj))
unsigned unmapped = 0; return false;
list_for_each_entry(msm_obj, mm_list, mm_list) {
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
if (!msm_gem_trylock(&msm_obj->base))
continue;
if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base); msm_gem_vunmap(&msm_obj->base);
unmapped++;
}
msm_gem_unlock(&msm_obj->base);
if (++unmapped >= vmap_shrink_limit)
break;
}
return unmapped; return true;
} }
static int static int
...@@ -133,17 +137,11 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -133,17 +137,11 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
}; };
unsigned idx, unmapped = 0; unsigned idx, unmapped = 0;
mutex_lock(&priv->mm_lock); for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += scan(priv, vmap_shrink_limit - unmapped,
for (idx = 0; mm_lists[idx]; idx++) { mm_lists[idx], vmap_shrink);
unmapped += vmap_shrink(mm_lists[idx]);
if (unmapped >= vmap_shrink_limit)
break;
} }
mutex_unlock(&priv->mm_lock);
*(unsigned long *)ptr += unmapped; *(unsigned long *)ptr += unmapped;
if (unmapped > 0) if (unmapped > 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment