Commit 025d2723 authored by Rob Clark's avatar Rob Clark

drm/msm/gem: Evict active GEM objects when necessary

If we are under enough memory pressure, we should stall waiting for
active buffers to become idle in order to evict.

v2: Check for __GFP_ATOMIC before blocking
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496135/
Link: https://lore.kernel.org/r/20220802155152.1727594-14-robdclark@gmail.com
parent dd2f0d78
......@@ -24,6 +24,13 @@ static bool can_swap(void)
return enable_eviction && get_nr_swap_pages() > 0;
}
static bool can_block(struct shrink_control *sc)
{
if (sc->gfp_mask & __GFP_ATOMIC)
return false;
return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
......@@ -65,26 +72,65 @@ evict(struct drm_gem_object *obj)
return true;
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
}
static bool
active_purge(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return purge(obj);
}
static bool
active_evict(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return evict(obj);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
{ &priv->lru.willneed, evict, can_swap() },
{ &priv->lru.dontneed, active_purge, can_block(sc) },
{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
};
long nr = sc->nr_to_scan;
unsigned long freed, purged, evicted = 0;
purged = drm_gem_lru_scan(&priv->lru.dontneed, nr, purge);
nr -= purged;
if (can_swap() && nr > 0) {
evicted = drm_gem_lru_scan(&priv->lru.willneed, nr, evict);
nr -= evicted;
unsigned long freed = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
}
freed = purged + evicted;
if (freed)
trace_msm_gem_shrink(sc->nr_to_scan, purged, evicted);
if (freed) {
trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
stages[1].freed, stages[2].freed,
stages[3].freed);
}
return (freed > 0) ? freed : SHRINK_STOP;
}
......
......@@ -116,22 +116,26 @@ TRACE_EVENT(msm_gmu_freq_change,
TRACE_EVENT(msm_gem_shrink,
TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted),
TP_ARGS(nr_to_scan, purged, evicted),
TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
u32 active_purged, u32 active_evicted),
TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
TP_STRUCT__entry(
__field(u32, nr_to_scan)
__field(u32, purged)
__field(u32, evicted)
__field(u32, active_purged)
__field(u32, active_evicted)
),
TP_fast_assign(
__entry->nr_to_scan = nr_to_scan;
__entry->purged = purged;
__entry->evicted = evicted;
__entry->active_purged = active_purged;
__entry->active_evicted = active_evicted;
),
TP_printk("nr_to_scan=%u pages, purged=%u pages, evicted=%u pages",
__entry->nr_to_scan,
__entry->purged,
__entry->evicted)
TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
__entry->nr_to_scan, __entry->purged, __entry->evicted,
__entry->active_purged, __entry->active_evicted)
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment