Commit 1bfcbad1 authored by Roger He's avatar Roger He Committed by Alex Deucher

drm/ttm: roundup the shrink request to prevent skip huge pool

e.g. shrink reqeust is less than 512, the logic will skip huge pool
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 444f8ef3
...@@ -442,17 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -442,17 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
/* select start pool in round robin fashion */ /* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) { for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages; unsigned nr_free = shrink_pages;
unsigned page_nr;
if (shrink_pages == 0) if (shrink_pages == 0)
break; break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
page_nr = (1 << pool->order);
/* OK to use static buffer since global mutex is held. */ /* OK to use static buffer since global mutex is held. */
nr_free_pool = (nr_free >> pool->order); nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
if (nr_free_pool == 0)
continue;
shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
freed += ((nr_free_pool - shrink_pages) << pool->order); freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
} }
mutex_unlock(&lock); mutex_unlock(&lock);
return freed; return freed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment