Commit e1e9db2c authored by Rob Clark's avatar Rob Clark

drm/msm: wire up vmap shrinker

Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 18f23049
...@@ -153,6 +153,7 @@ struct msm_drm_private { ...@@ -153,6 +153,7 @@ struct msm_drm_private {
struct drm_mm mm; struct drm_mm mm;
} vram; } vram;
struct notifier_block vmap_notifier;
struct shrinker shrinker; struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl; struct msm_vblank_ctrl vblank_ctrl;
...@@ -206,6 +207,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); ...@@ -206,6 +207,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive); struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
......
...@@ -421,6 +421,7 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) ...@@ -421,6 +421,7 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
if (msm_obj->vaddr == NULL) if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
msm_obj->vmap_count++;
return msm_obj->vaddr; return msm_obj->vaddr;
} }
...@@ -435,13 +436,17 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj) ...@@ -435,13 +436,17 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
/* no-op for now */ WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
} }
void msm_gem_put_vaddr(struct drm_gem_object *obj) void msm_gem_put_vaddr(struct drm_gem_object *obj)
{ {
/* no-op for now */ mutex_lock(&obj->dev->struct_mutex);
msm_gem_put_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
} }
/* Update madvise status, returns true if not purged, else /* Update madvise status, returns true if not purged, else
...@@ -470,8 +475,7 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -470,8 +475,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova(obj); put_iova(obj);
vunmap(msm_obj->vaddr); msm_gem_vunmap(obj);
msm_obj->vaddr = NULL;
put_pages(obj); put_pages(obj);
...@@ -491,6 +495,17 @@ void msm_gem_purge(struct drm_gem_object *obj) ...@@ -491,6 +495,17 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1); 0, (loff_t)-1);
} }
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
msm_obj->vaddr = NULL;
}
/* must be called before _move_to_active().. */ /* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive) struct msm_fence_context *fctx, bool exclusive)
...@@ -694,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -694,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt); drm_prime_gem_destroy(obj, msm_obj->sgt);
} else { } else {
vunmap(msm_obj->vaddr); msm_gem_vunmap(obj);
put_pages(obj); put_pages(obj);
} }
......
...@@ -34,6 +34,11 @@ struct msm_gem_object { ...@@ -34,6 +34,11 @@ struct msm_gem_object {
*/ */
uint8_t madv; uint8_t madv;
/**
* count of active vmap'ing
*/
uint8_t vmap_count;
/* And object is either: /* And object is either:
* inactive - on priv->inactive_list * inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at * active - on one one of the gpu's active_list.. well, at
...@@ -83,6 +88,11 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) ...@@ -83,6 +88,11 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
!msm_obj->base.dma_buf && !msm_obj->base.import_attach; !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
} }
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
#define MAX_CMDS 4 #define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
......
...@@ -100,6 +100,42 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -100,6 +100,42 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed; return freed;
} }
static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned unmapped = 0;
bool unlock;
if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base);
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*
*/
if (++unmapped >= 15)
break;
}
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
pr_info_ratelimited("Purging %u vmaps\n", unmapped);
return NOTIFY_DONE;
}
/** /**
* msm_gem_shrinker_init - Initialize msm shrinker * msm_gem_shrinker_init - Initialize msm shrinker
* @dev_priv: msm device * @dev_priv: msm device
...@@ -113,6 +149,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) ...@@ -113,6 +149,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.scan_objects = msm_gem_shrinker_scan; priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS; priv->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&priv->shrinker)); WARN_ON(register_shrinker(&priv->shrinker));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
} }
/** /**
...@@ -124,5 +163,6 @@ void msm_gem_shrinker_init(struct drm_device *dev) ...@@ -124,5 +163,6 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker); unregister_shrinker(&priv->shrinker);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment