Commit 6cbf16b3 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: use class->pages_per_zspage

There is no need to recalcurate pages_per_zspage in runtime.  Just use
class->pages_per_zspage to avoid unnecessary runtime overhead.
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ad9d5e17
...@@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class) ...@@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class)
obj_wasted /= get_maxobj_per_zspage(class->size, obj_wasted /= get_maxobj_per_zspage(class->size,
class->pages_per_zspage); class->pages_per_zspage);
return obj_wasted * get_pages_per_zspage(class->size); return obj_wasted * class->pages_per_zspage;
} }
static void __zs_compact(struct zs_pool *pool, struct size_class *class) static void __zs_compact(struct zs_pool *pool, struct size_class *class)
...@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) ...@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(pool, class, dst_page); putback_zspage(pool, class, dst_page);
if (putback_zspage(pool, class, src_page) == ZS_EMPTY) if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
pool->stats.pages_compacted += pool->stats.pages_compacted += class->pages_per_zspage;
get_pages_per_zspage(class->size);
spin_unlock(&class->lock); spin_unlock(&class->lock);
cond_resched(); cond_resched();
spin_lock(&class->lock); spin_lock(&class->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment