Commit d468f1b8 authored by Chengming Zhou's avatar Chengming Zhou Committed by Andrew Morton

mm/zsmalloc: move record_obj() into obj_malloc()

We always record_obj() to make handle points to object after obj_malloc(),
so simplify the code by moving record_obj() into obj_malloc().  There
should be no functional change.

Link: https://lkml.kernel.org/r/20240627075959.611783-2-chengming.zhou@linux.devSigned-off-by: default avatarChengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 538148f9
...@@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct zs_pool *pool, ...@@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct zs_pool *pool,
void *vaddr; void *vaddr;
class = pool->size_class[zspage->class]; class = pool->size_class[zspage->class];
handle |= OBJ_ALLOCATED_TAG;
obj = get_freeobj(zspage); obj = get_freeobj(zspage);
offset = obj * class->size; offset = obj * class->size;
...@@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct zs_pool *pool, ...@@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct zs_pool *pool,
set_freeobj(zspage, link->next >> OBJ_TAG_BITS); set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage))) if (likely(!ZsHugePage(zspage)))
/* record handle in the header of allocated chunk */ /* record handle in the header of allocated chunk */
link->handle = handle; link->handle = handle | OBJ_ALLOCATED_TAG;
else else
/* record handle to page->index */ /* record handle to page->index */
zspage->first_page->index = handle; zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1); mod_zspage_inuse(zspage, 1);
obj = location_to_obj(m_page, obj); obj = location_to_obj(m_page, obj);
record_obj(handle, obj);
return obj; return obj;
} }
...@@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, ...@@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
*/ */
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{ {
unsigned long handle, obj; unsigned long handle;
struct size_class *class; struct size_class *class;
int newfg; int newfg;
struct zspage *zspage; struct zspage *zspage;
...@@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
spin_lock(&class->lock); spin_lock(&class->lock);
zspage = find_get_zspage(class); zspage = find_get_zspage(class);
if (likely(zspage)) { if (likely(zspage)) {
obj = obj_malloc(pool, zspage, handle); obj_malloc(pool, zspage, handle);
/* Now move the zspage to another fullness group, if required */ /* Now move the zspage to another fullness group, if required */
fix_fullness_group(class, zspage); fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, ZS_OBJS_INUSE, 1); class_stat_inc(class, ZS_OBJS_INUSE, 1);
goto out; goto out;
...@@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
} }
spin_lock(&class->lock); spin_lock(&class->lock);
obj = obj_malloc(pool, zspage, handle); obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage); newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg); insert_zspage(class, zspage, newfg);
record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
class_stat_inc(class, ZS_OBJS_INUSE, 1); class_stat_inc(class, ZS_OBJS_INUSE, 1);
...@@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, ...@@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
free_obj = obj_malloc(pool, dst_zspage, handle); free_obj = obj_malloc(pool, dst_zspage, handle);
zs_object_copy(class, free_obj, used_obj); zs_object_copy(class, free_obj, used_obj);
obj_idx++; obj_idx++;
record_obj(handle, free_obj);
obj_free(class->size, used_obj); obj_free(class->size, used_obj);
/* Stop if there is no more space */ /* Stop if there is no more space */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment