Commit 23959281 authored by Rokudo Yan's avatar Rokudo Yan Committed by Linus Torvalds

zsmalloc: account the number of compacted pages correctly

There exists multiple path may do zram compaction concurrently.
1. auto-compaction triggered during memory reclaim
2. userspace utils write zram<id>/compaction node

So, multiple threads may call zs_shrinker_scan/zs_compact concurrently.
But pages_compacted is a per zsmalloc pool variable and modification
of the variable is not serialized(through under class->lock).
There are two issues here:
1. the pages_compacted may not equal to total number of pages
freed(due to concurrently add).
2. zs_shrinker_scan may not return the correct number of pages
freed(issued by current shrinker).

The fix is simple:
1. account the number of pages freed in zs_compact locally.
2. use actomic variable pages_compacted to accumulate total number.

Link: https://lkml.kernel.org/r/20210202122235.26885-1-wu-yan@tcl.com
Fixes: 860c707d ("zsmalloc: account the number of compacted pages")
Signed-off-by: default avatarRokudo Yan <wu-yan@tcl.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f0231305
...@@ -1081,7 +1081,7 @@ static ssize_t mm_stat_show(struct device *dev, ...@@ -1081,7 +1081,7 @@ static ssize_t mm_stat_show(struct device *dev,
zram->limit_pages << PAGE_SHIFT, zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT, max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages), (u64)atomic64_read(&zram->stats.same_pages),
pool_stats.pages_compacted, atomic_long_read(&pool_stats.pages_compacted),
(u64)atomic64_read(&zram->stats.huge_pages), (u64)atomic64_read(&zram->stats.huge_pages),
(u64)atomic64_read(&zram->stats.huge_pages_since)); (u64)atomic64_read(&zram->stats.huge_pages_since));
up_read(&zram->init_lock); up_read(&zram->init_lock);
......
...@@ -35,7 +35,7 @@ enum zs_mapmode { ...@@ -35,7 +35,7 @@ enum zs_mapmode {
struct zs_pool_stats { struct zs_pool_stats {
/* How many pages were migrated (freed) */ /* How many pages were migrated (freed) */
unsigned long pages_compacted; atomic_long_t pages_compacted;
}; };
struct zs_pool; struct zs_pool;
......
...@@ -2212,11 +2212,13 @@ static unsigned long zs_can_compact(struct size_class *class) ...@@ -2212,11 +2212,13 @@ static unsigned long zs_can_compact(struct size_class *class)
return obj_wasted * class->pages_per_zspage; return obj_wasted * class->pages_per_zspage;
} }
static void __zs_compact(struct zs_pool *pool, struct size_class *class) static unsigned long __zs_compact(struct zs_pool *pool,
struct size_class *class)
{ {
struct zs_compact_control cc; struct zs_compact_control cc;
struct zspage *src_zspage; struct zspage *src_zspage;
struct zspage *dst_zspage = NULL; struct zspage *dst_zspage = NULL;
unsigned long pages_freed = 0;
spin_lock(&class->lock); spin_lock(&class->lock);
while ((src_zspage = isolate_zspage(class, true))) { while ((src_zspage = isolate_zspage(class, true))) {
...@@ -2246,7 +2248,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) ...@@ -2246,7 +2248,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, dst_zspage); putback_zspage(class, dst_zspage);
if (putback_zspage(class, src_zspage) == ZS_EMPTY) { if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
free_zspage(pool, class, src_zspage); free_zspage(pool, class, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage; pages_freed += class->pages_per_zspage;
} }
spin_unlock(&class->lock); spin_unlock(&class->lock);
cond_resched(); cond_resched();
...@@ -2257,12 +2259,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) ...@@ -2257,12 +2259,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, src_zspage); putback_zspage(class, src_zspage);
spin_unlock(&class->lock); spin_unlock(&class->lock);
return pages_freed;
} }
unsigned long zs_compact(struct zs_pool *pool) unsigned long zs_compact(struct zs_pool *pool)
{ {
int i; int i;
struct size_class *class; struct size_class *class;
unsigned long pages_freed = 0;
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i]; class = pool->size_class[i];
...@@ -2270,10 +2275,11 @@ unsigned long zs_compact(struct zs_pool *pool) ...@@ -2270,10 +2275,11 @@ unsigned long zs_compact(struct zs_pool *pool)
continue; continue;
if (class->index != i) if (class->index != i)
continue; continue;
__zs_compact(pool, class); pages_freed += __zs_compact(pool, class);
} }
atomic_long_add(pages_freed, &pool->stats.pages_compacted);
return pool->stats.pages_compacted; return pages_freed;
} }
EXPORT_SYMBOL_GPL(zs_compact); EXPORT_SYMBOL_GPL(zs_compact);
...@@ -2290,13 +2296,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker, ...@@ -2290,13 +2296,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
struct zs_pool *pool = container_of(shrinker, struct zs_pool, struct zs_pool *pool = container_of(shrinker, struct zs_pool,
shrinker); shrinker);
pages_freed = pool->stats.pages_compacted;
/* /*
* Compact classes and calculate compaction delta. * Compact classes and calculate compaction delta.
* Can run concurrently with a manually triggered * Can run concurrently with a manually triggered
* (by user) compaction. * (by user) compaction.
*/ */
pages_freed = zs_compact(pool) - pages_freed; pages_freed = zs_compact(pool);
return pages_freed ? pages_freed : SHRINK_STOP; return pages_freed ? pages_freed : SHRINK_STOP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment