Commit 70f75067 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: avoid returning values by reference

Returing values by reference is bad practice.  Instead, just use
function return value.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Suggested-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b03a017b
...@@ -460,9 +460,10 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) ...@@ -460,9 +460,10 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
/* /*
* Calculate the number of objects and left-over bytes for a given buffer size. * Calculate the number of objects and left-over bytes for a given buffer size.
*/ */
static void cache_estimate(unsigned long gfporder, size_t buffer_size, static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
unsigned long flags, size_t *left_over, unsigned int *num) unsigned long flags, size_t *left_over)
{ {
unsigned int num;
size_t slab_size = PAGE_SIZE << gfporder; size_t slab_size = PAGE_SIZE << gfporder;
/* /*
...@@ -483,13 +484,15 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -483,13 +484,15 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* correct alignment when allocated. * correct alignment when allocated.
*/ */
if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
*num = slab_size / buffer_size; num = slab_size / buffer_size;
*left_over = slab_size % buffer_size; *left_over = slab_size % buffer_size;
} else { } else {
*num = slab_size / (buffer_size + sizeof(freelist_idx_t)); num = slab_size / (buffer_size + sizeof(freelist_idx_t));
*left_over = slab_size % *left_over = slab_size %
(buffer_size + sizeof(freelist_idx_t)); (buffer_size + sizeof(freelist_idx_t));
} }
return num;
} }
#if DEBUG #if DEBUG
...@@ -1893,7 +1896,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -1893,7 +1896,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
unsigned int num; unsigned int num;
size_t remainder; size_t remainder;
cache_estimate(gfporder, size, flags, &remainder, &num); num = cache_estimate(gfporder, size, flags, &remainder);
if (!num) if (!num)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment