Commit 0dc63d48 authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Linus Torvalds

zsmalloc: cosmetic compaction code adjustments

Change zs_object_copy() argument order to be (DST, SRC) rather than
(SRC, DST).  copy/move functions usually have (to, from) arguments
order.

Rename alloc_target_page() to isolate_target_page().  This function
doesn't allocate anything, it isolates target page, pretty much like
isolate_source_page().

Tweak __zs_compact() comment.
Signed-off-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 04f05909
...@@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) ...@@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
} }
EXPORT_SYMBOL_GPL(zs_free); EXPORT_SYMBOL_GPL(zs_free);
static void zs_object_copy(unsigned long src, unsigned long dst, static void zs_object_copy(unsigned long dst, unsigned long src,
struct size_class *class) struct size_class *class)
{ {
struct page *s_page, *d_page; struct page *s_page, *d_page;
...@@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, ...@@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
used_obj = handle_to_obj(handle); used_obj = handle_to_obj(handle);
free_obj = obj_malloc(d_page, class, handle); free_obj = obj_malloc(d_page, class, handle);
zs_object_copy(used_obj, free_obj, class); zs_object_copy(free_obj, used_obj, class);
index++; index++;
record_obj(handle, free_obj); record_obj(handle, free_obj);
unpin_tag(handle); unpin_tag(handle);
...@@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, ...@@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
return ret; return ret;
} }
static struct page *alloc_target_page(struct size_class *class) static struct page *isolate_target_page(struct size_class *class)
{ {
int i; int i;
struct page *page; struct page *page;
...@@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, ...@@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
cc.index = 0; cc.index = 0;
cc.s_page = src_page; cc.s_page = src_page;
while ((dst_page = alloc_target_page(class))) { while ((dst_page = isolate_target_page(class))) {
cc.d_page = dst_page; cc.d_page = dst_page;
/* /*
* If there is no more space in dst_page, try to * If there is no more space in dst_page, resched
* allocate another zspage. * and see if anyone had allocated another zspage.
*/ */
if (!migrate_zspage(pool, class, &cc)) if (!migrate_zspage(pool, class, &cc))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment