Commit dc0a7f75 authored by Pengfei Li's avatar Pengfei Li Committed by Linus Torvalds

mm, slab: remove unused kmalloc_size()

The size of kmalloc can be obtained from kmalloc_info[], so remove
kmalloc_size() that will not be used anymore.

Link: http://lkml.kernel.org/r/1569241648-26908-3-git-send-email-lpf.vector@gmail.comSigned-off-by: default avatarPengfei Li <lpf.vector@gmail.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cb5d9fb3
...@@ -561,26 +561,6 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -561,26 +561,6 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags); return __kmalloc(size, flags);
} }
/*
* Determine size used for the nth kmalloc cache.
* return size or 0 if a kmalloc cache for that
* size does not exist
*/
static __always_inline unsigned int kmalloc_size(unsigned int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
return 1U << n;
if (n == 1 && KMALLOC_MIN_SIZE <= 32)
return 96;
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
return 192;
#endif
return 0;
}
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
......
...@@ -1248,8 +1248,9 @@ void __init kmem_cache_init(void) ...@@ -1248,8 +1248,9 @@ void __init kmem_cache_init(void)
*/ */
kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS, kmalloc_info[INDEX_NODE].size,
0, kmalloc_size(INDEX_NODE)); ARCH_KMALLOC_FLAGS, 0,
kmalloc_info[INDEX_NODE].size);
slab_state = PARTIAL_NODE; slab_state = PARTIAL_NODE;
setup_kmalloc_cache_index_table(); setup_kmalloc_cache_index_table();
......
...@@ -1286,11 +1286,10 @@ void __init create_kmalloc_caches(slab_flags_t flags) ...@@ -1286,11 +1286,10 @@ void __init create_kmalloc_caches(slab_flags_t flags)
struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i]; struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
if (s) { if (s) {
unsigned int size = kmalloc_size(i);
kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache( kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
kmalloc_info[i].name[KMALLOC_DMA], kmalloc_info[i].name[KMALLOC_DMA],
size, SLAB_CACHE_DMA | flags, 0, 0); kmalloc_info[i].size,
SLAB_CACHE_DMA | flags, 0, 0);
} }
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment