Commit 64fcbde7 authored by Rob Clark's avatar Rob Clark

drm/msm: Track potentially evictable objects

Objects that are potential for swapping out are (1) willneed (ie. if
they are purgable/MADV_WONTNEED we can just free the pages without them
having to land in swap), (2) not on an active list, (3) not dma-buf
imported or exported, and (4) not vmap'd.  This repurposes the purged
list for objects that do not have backing pages (either because they
have not been pinned for the first time yet, or in a later patch because
they have been unpinned/evicted.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20210405174532.1441497-7-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent f48f3563
...@@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ...@@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_willneed);
INIT_LIST_HEAD(&priv->inactive_dontneed); INIT_LIST_HEAD(&priv->inactive_dontneed);
INIT_LIST_HEAD(&priv->inactive_purged); INIT_LIST_HEAD(&priv->inactive_unpinned);
mutex_init(&priv->mm_lock); mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */ /* Teach lockdep about lock ordering wrt. shrinker: */
......
...@@ -182,11 +182,15 @@ struct msm_drm_private { ...@@ -182,11 +182,15 @@ struct msm_drm_private {
struct mutex obj_lock; struct mutex obj_lock;
/** /**
* Lists of inactive GEM objects. Every bo is either in one of the * LRUs of inactive GEM objects. Every bo is either in one of the
* inactive lists (depending on whether or not it is shrinkable) or * inactive lists (depending on whether or not it is shrinkable) or
* gpu->active_list (for the gpu it is active on[1]), or transiently * gpu->active_list (for the gpu it is active on[1]), or transiently
* on a temporary list as the shrinker is running. * on a temporary list as the shrinker is running.
* *
* Note that inactive_willneed also contains pinned and vmap'd bos,
* but the number of pinned-but-not-active objects is small (scanout
* buffers, ringbuffer, etc).
*
* These lists are protected by mm_lock (which should be acquired * These lists are protected by mm_lock (which should be acquired
* before per GEM object lock). One should *not* hold mm_lock in * before per GEM object lock). One should *not* hold mm_lock in
* get_pages()/vmap()/etc paths, as they can trigger the shrinker. * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
...@@ -194,10 +198,11 @@ struct msm_drm_private { ...@@ -194,10 +198,11 @@ struct msm_drm_private {
* [1] if someone ever added support for the old 2d cores, there could be * [1] if someone ever added support for the old 2d cores, there could be
* more than one gpu object * more than one gpu object
*/ */
struct list_head inactive_willneed; /* inactive + !shrinkable */ struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */
struct list_head inactive_dontneed; /* inactive + shrinkable */ struct list_head inactive_dontneed; /* inactive + shrinkable */
struct list_head inactive_purged; /* inactive + purged */ struct list_head inactive_unpinned; /* inactive + purged or unpinned */
long shrinkable_count; /* write access under mm_lock */ long shrinkable_count; /* write access under mm_lock */
long evictable_count; /* write access under mm_lock */
struct mutex mm_lock; struct mutex mm_lock;
struct workqueue_struct *wq; struct workqueue_struct *wq;
......
...@@ -130,6 +130,9 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -130,6 +130,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
*/ */
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
sync_for_device(msm_obj); sync_for_device(msm_obj);
GEM_WARN_ON(msm_obj->active_count);
update_inactive(msm_obj);
} }
return msm_obj->pages; return msm_obj->pages;
...@@ -428,7 +431,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, ...@@ -428,7 +431,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
struct page **pages; struct page **pages;
int prot = IOMMU_READ; int ret, prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE; prot |= IOMMU_WRITE;
...@@ -449,8 +452,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, ...@@ -449,8 +452,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
return msm_gem_map_vma(aspace, vma, prot, ret = msm_gem_map_vma(aspace, vma, prot,
msm_obj->sgt, obj->size >> PAGE_SHIFT); msm_obj->sgt, obj->size >> PAGE_SHIFT);
if (!ret)
msm_obj->pin_count++;
return ret;
} }
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
...@@ -542,14 +550,21 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj, ...@@ -542,14 +550,21 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace) struct msm_gem_address_space *aspace)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) if (!GEM_WARN_ON(!vma)) {
msm_gem_unmap_vma(aspace, vma); msm_gem_unmap_vma(aspace, vma);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_inactive(msm_obj);
}
} }
/* /*
...@@ -800,9 +815,12 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) ...@@ -800,9 +815,12 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
GEM_WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
GEM_WARN_ON(msm_obj->dontneed); GEM_WARN_ON(msm_obj->dontneed);
GEM_WARN_ON(!msm_obj->sgt);
if (msm_obj->active_count++ == 0) { if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
list_del(&msm_obj->mm_list); list_del(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
...@@ -825,21 +843,28 @@ static void update_inactive(struct msm_gem_object *msm_obj) ...@@ -825,21 +843,28 @@ static void update_inactive(struct msm_gem_object *msm_obj)
{ {
struct msm_drm_private *priv = msm_obj->base.dev->dev_private; struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
if (msm_obj->active_count != 0)
return;
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
GEM_WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed) if (msm_obj->dontneed)
mark_unpurgeable(msm_obj); mark_unpurgeable(msm_obj);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
list_del(&msm_obj->mm_list); list_del(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED) { if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mark_evictable(msm_obj);
} else if (msm_obj->madv == MSM_MADV_DONTNEED) { } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgeable(msm_obj); mark_purgeable(msm_obj);
} else { } else {
GEM_WARN_ON(msm_obj->madv != __MSM_MADV_PURGED); GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged); list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
} }
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
...@@ -1201,8 +1226,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, ...@@ -1201,8 +1226,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
} }
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
/* Initially obj is idle, obj->madv == WILLNEED: */ list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
mutex_lock(&priv->obj_lock); mutex_lock(&priv->obj_lock);
...@@ -1276,7 +1300,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -1276,7 +1300,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj); msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
mutex_lock(&priv->obj_lock); mutex_lock(&priv->obj_lock);
......
...@@ -60,6 +60,11 @@ struct msm_gem_object { ...@@ -60,6 +60,11 @@ struct msm_gem_object {
*/ */
bool dontneed : 1; bool dontneed : 1;
/**
* Is object evictable (ie. counted in priv->evictable_count)?
*/
bool evictable : 1;
/** /**
* count of active vmap'ing * count of active vmap'ing
*/ */
...@@ -74,7 +79,7 @@ struct msm_gem_object { ...@@ -74,7 +79,7 @@ struct msm_gem_object {
/** /**
* An object is either: * An object is either:
* inactive - on priv->inactive_dontneed or priv->inactive_willneed * inactive - on priv->inactive_dontneed or priv->inactive_willneed
* (depending on purgability status) * (depending on purgeability status)
* active - on one one of the gpu's active_list.. well, at * active - on one one of the gpu's active_list.. well, at
* least for now we don't have (I don't think) hw sync between * least for now we don't have (I don't think) hw sync between
* 2d and 3d one devices which have both, meaning we need to * 2d and 3d one devices which have both, meaning we need to
...@@ -103,6 +108,7 @@ struct msm_gem_object { ...@@ -103,6 +108,7 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */ char name[32]; /* Identifier to print for the debugfs files */
int active_count; int active_count;
int pin_count;
}; };
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
...@@ -263,7 +269,46 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) ...@@ -263,7 +269,46 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
msm_obj->dontneed = false; msm_obj->dontneed = false;
} }
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
}
static inline void mark_evictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(msm_obj->evictable))
return;
priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->evictable = true;
}
static inline void mark_unevictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(!msm_obj->evictable))
return;
priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->evictable_count < 0);
msm_obj->evictable = false;
}
void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment