Commit cf11c1f8 authored by Rob Clark's avatar Rob Clark

drm/msm: Drop struct_mutex in shrinker path

Now that the inactive_list is protected by mm_lock, and everything
else on per-obj basis is protected by obj->resv, we no longer depend
on struct_mutex.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarKristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent f92f026a
......@@ -687,7 +687,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
......
......@@ -8,48 +8,13 @@
#include "msm_gem.h"
#include "msm_gpu_trace.h"
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
/* NOTE: we are *closer* to being able to get rid of
* mutex_trylock_recursive().. the msm_gem code itself does
* not need struct_mutex, although codepaths that can trigger
* shrinker are still called in code-paths that hold the
* struct_mutex.
*
* Also, msm_obj->madv is protected by struct_mutex.
*
* The next step is probably split out a seperate lock for
* protecting inactive_list, so that shrinker does not need
* struct_mutex.
*/
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
}
BUG();
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long count = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return 0;
mutex_lock(&priv->mm_lock);
......@@ -63,9 +28,6 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
return count;
}
......@@ -74,13 +36,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long freed = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
mutex_lock(&priv->mm_lock);
......@@ -98,9 +55,6 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);
......@@ -112,13 +66,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned unmapped = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;
mutex_lock(&priv->mm_lock);
......@@ -141,9 +90,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment