Commit e3366016 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slab: Use common kmalloc_index/kmalloc_size functions

Make slab use the common functions. We can get rid of a lot
of old ugly stuff as a results. Among them the sizes
array and the weird include/linux/kmalloc_sizes file and
some pretty bad #include statements in slab_def.h.

The one thing that is different in slab is that the 32 byte
cache will also be created for arches that have page sizes
larger than 4K. There are numerous smaller allocations that
SLOB and SLUB can handle better because of their support for
smaller allocation sizes so lets keep the 32 byte slab also
for arches with > 4K pages.
Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent ce6a5026
#if (PAGE_SIZE == 4096)
CACHE(32)
#endif
CACHE(64)
#if L1_CACHE_BYTES < 64
CACHE(96)
#endif
CACHE(128)
#if L1_CACHE_BYTES < 128
CACHE(192)
#endif
CACHE(256)
CACHE(512)
CACHE(1024)
CACHE(2048)
CACHE(4096)
CACHE(8192)
CACHE(16384)
CACHE(32768)
CACHE(65536)
CACHE(131072)
#if KMALLOC_MAX_SIZE >= 262144
CACHE(262144)
#endif
#if KMALLOC_MAX_SIZE >= 524288
CACHE(524288)
#endif
#if KMALLOC_MAX_SIZE >= 1048576
CACHE(1048576)
#endif
#if KMALLOC_MAX_SIZE >= 2097152
CACHE(2097152)
#endif
#if KMALLOC_MAX_SIZE >= 4194304
CACHE(4194304)
#endif
#if KMALLOC_MAX_SIZE >= 8388608
CACHE(8388608)
#endif
#if KMALLOC_MAX_SIZE >= 16777216
CACHE(16777216)
#endif
#if KMALLOC_MAX_SIZE >= 33554432
CACHE(33554432)
#endif
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h> #include <linux/compiler.h>
/* /*
...@@ -104,15 +102,8 @@ struct kmem_cache { ...@@ -104,15 +102,8 @@ struct kmem_cache {
*/ */
}; };
/* Size description struct for general caches. */ extern struct kmem_cache *kmalloc_caches[PAGE_SHIFT + MAX_ORDER];
struct cache_sizes { extern struct kmem_cache *kmalloc_dma_caches[PAGE_SHIFT + MAX_ORDER];
size_t cs_size;
struct kmem_cache *cs_cachep;
#ifdef CONFIG_ZONE_DMA
struct kmem_cache *cs_dmacachep;
#endif
};
extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
...@@ -133,26 +124,19 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -133,26 +124,19 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *ret; void *ret;
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i;
if (!size) if (!size)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
#define CACHE(x) \ i = kmalloc_index(size);
if (size <= x) \
goto found; \
else \
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
return NULL;
found:
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA) if (flags & GFP_DMA)
cachep = malloc_sizes[i].cs_dmacachep; cachep = kmalloc_dma_caches[i];
else else
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = kmalloc_caches[i];
ret = kmem_cache_alloc_trace(cachep, flags, size); ret = kmem_cache_alloc_trace(cachep, flags, size);
...@@ -186,26 +170,19 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -186,26 +170,19 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *cachep; struct kmem_cache *cachep;
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i;
if (!size) if (!size)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
#define CACHE(x) \ i = kmalloc_index(size);
if (size <= x) \
goto found; \
else \
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
return NULL;
found:
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA) if (flags & GFP_DMA)
cachep = malloc_sizes[i].cs_dmacachep; cachep = kmalloc_dma_caches[i];
else else
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = kmalloc_caches[i];
return kmem_cache_alloc_node_trace(cachep, flags, node, size); return kmem_cache_alloc_node_trace(cachep, flags, node, size);
} }
......
...@@ -318,34 +318,18 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int len, ...@@ -318,34 +318,18 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int len,
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static void cache_reap(struct work_struct *unused); static void cache_reap(struct work_struct *unused);
/* struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
* This function must be completely optimized away if a constant is passed to EXPORT_SYMBOL(kmalloc_caches);
* it. Mostly the same as what is in linux/slab.h except it returns an index.
*/
static __always_inline int index_of(const size_t size)
{
extern void __bad_size(void);
if (__builtin_constant_p(size)) {
int i = 0;
#define CACHE(x) \ #ifdef CONFIG_ZONE_DMA
if (size <=x) \ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
return i; \ EXPORT_SYMBOL(kmalloc_dma_caches);
else \ #endif
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
__bad_size();
} else
__bad_size();
return 0;
}
static int slab_early_init = 1; static int slab_early_init = 1;
#define INDEX_AC index_of(sizeof(struct arraycache_init)) #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3)) #define INDEX_L3 kmalloc_index(sizeof(struct kmem_list3))
static void kmem_list3_init(struct kmem_list3 *parent) static void kmem_list3_init(struct kmem_list3 *parent)
{ {
...@@ -524,30 +508,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, ...@@ -524,30 +508,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
return reciprocal_divide(offset, cache->reciprocal_buffer_size); return reciprocal_divide(offset, cache->reciprocal_buffer_size);
} }
/*
* These are the default caches for kmalloc. Custom caches can have other sizes.
*/
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);
/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
char *name;
char *name_dma;
};
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
{NULL,}
#undef CACHE
};
static struct arraycache_init initarray_generic = static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
...@@ -625,19 +585,23 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) ...@@ -625,19 +585,23 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
static void init_node_lock_keys(int q) static void init_node_lock_keys(int q)
{ {
struct cache_sizes *s = malloc_sizes; int i;
if (slab_state < UP) if (slab_state < UP)
return; return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
struct kmem_list3 *l3; struct kmem_list3 *l3;
struct kmem_cache *cache = kmalloc_caches[i];
if (!cache)
continue;
l3 = s->cs_cachep->nodelists[q]; l3 = cache->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep)) if (!l3 || OFF_SLAB(cache))
continue; continue;
slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, slab_set_lock_classes(cache, &on_slab_l3_key,
&on_slab_alc_key, q); &on_slab_alc_key, q);
} }
} }
...@@ -705,20 +669,19 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) ...@@ -705,20 +669,19 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static inline struct kmem_cache *__find_general_cachep(size_t size, static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags) gfp_t gfpflags)
{ {
struct cache_sizes *csizep = malloc_sizes; int i;
#if DEBUG #if DEBUG
/* This happens if someone tries to call /* This happens if someone tries to call
* kmem_cache_create(), or __kmalloc(), before * kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized. * the generic caches are initialized.
*/ */
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); BUG_ON(kmalloc_caches[INDEX_AC] == NULL);
#endif #endif
if (!size) if (!size)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
while (size > csizep->cs_size) i = kmalloc_index(size);
csizep++;
/* /*
* Really subtle: The last entry with cs->cs_size==ULONG_MAX * Really subtle: The last entry with cs->cs_size==ULONG_MAX
...@@ -727,9 +690,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, ...@@ -727,9 +690,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/ */
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA)) if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep; return kmalloc_dma_caches[i];
#endif #endif
return csizep->cs_cachep; return kmalloc_caches[i];
} }
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
...@@ -1602,8 +1565,6 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) ...@@ -1602,8 +1565,6 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep)
*/ */
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
struct cache_sizes *sizes;
struct cache_names *names;
int i; int i;
kmem_cache = &kmem_cache_boot; kmem_cache = &kmem_cache_boot;
...@@ -1657,8 +1618,6 @@ void __init kmem_cache_init(void) ...@@ -1657,8 +1618,6 @@ void __init kmem_cache_init(void)
list_add(&kmem_cache->list, &slab_caches); list_add(&kmem_cache->list, &slab_caches);
/* 2+3) create the kmalloc caches */ /* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
names = cache_names;
/* /*
* Initialize the caches that provide memory for the array cache and the * Initialize the caches that provide memory for the array cache and the
...@@ -1666,17 +1625,23 @@ void __init kmem_cache_init(void) ...@@ -1666,17 +1625,23 @@ void __init kmem_cache_init(void)
* bug. * bug.
*/ */
sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
if (INDEX_AC != INDEX_L3) if (INDEX_AC != INDEX_L3)
sizes[INDEX_L3].cs_cachep = kmalloc_caches[INDEX_L3] =
create_kmalloc_cache(names[INDEX_L3].name, create_kmalloc_cache("kmalloc-l3",
sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS);
slab_early_init = 0; slab_early_init = 0;
while (sizes->cs_size != ULONG_MAX) { for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
size_t cs_size = kmalloc_size(i);
if (cs_size < KMALLOC_MIN_SIZE)
continue;
if (!kmalloc_caches[i]) {
/* /*
* For performance, all the general caches are L1 aligned. * For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it * This should be particularly beneficial on SMP boxes, as it
...@@ -1684,17 +1649,15 @@ void __init kmem_cache_init(void) ...@@ -1684,17 +1649,15 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will * Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches. * allow tighter packing of the smaller caches.
*/ */
if (!sizes->cs_cachep) kmalloc_caches[i] = create_kmalloc_cache("kmalloc",
sizes->cs_cachep = create_kmalloc_cache(names->name, cs_size, ARCH_KMALLOC_FLAGS);
sizes->cs_size, ARCH_KMALLOC_FLAGS); }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = create_kmalloc_cache( kmalloc_dma_caches[i] = create_kmalloc_cache(
names->name_dma, sizes->cs_size, "kmalloc-dma", cs_size,
SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
#endif #endif
sizes++;
names++;
} }
/* 4) Replace the bootstrap head arrays */ /* 4) Replace the bootstrap head arrays */
{ {
...@@ -1713,17 +1676,16 @@ void __init kmem_cache_init(void) ...@@ -1713,17 +1676,16 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
!= &initarray_generic.cache); != &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
sizeof(struct arraycache_init)); sizeof(struct arraycache_init));
/* /*
* Do not assume that spinlocks can be initialized via memcpy: * Do not assume that spinlocks can be initialized via memcpy:
*/ */
spin_lock_init(&ptr->lock); spin_lock_init(&ptr->lock);
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
ptr;
} }
/* 5) Replace the bootstrap kmem_list3's */ /* 5) Replace the bootstrap kmem_list3's */
{ {
...@@ -1732,17 +1694,39 @@ void __init kmem_cache_init(void) ...@@ -1732,17 +1694,39 @@ void __init kmem_cache_init(void)
for_each_online_node(nid) { for_each_online_node(nid) {
init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep, init_list(kmalloc_caches[INDEX_AC],
&initkmem_list3[SIZE_AC + nid], nid); &initkmem_list3[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep, init_list(kmalloc_caches[INDEX_L3],
&initkmem_list3[SIZE_L3 + nid], nid); &initkmem_list3[SIZE_L3 + nid], nid);
} }
} }
} }
slab_state = UP; slab_state = UP;
/* Create the proper names */
for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
char *s;
struct kmem_cache *c = kmalloc_caches[i];
if (!c)
continue;
s = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
BUG_ON(!s);
c->name = s;
#ifdef CONFIG_ZONE_DMA
c = kmalloc_dma_caches[i];
BUG_ON(!c);
s = kasprintf(GFP_NOWAIT, "dma-kmalloc-%d", kmalloc_size(i));
BUG_ON(!s);
c->name = s;
#endif
}
} }
void __init kmem_cache_init_late(void) void __init kmem_cache_init_late(void)
...@@ -2428,10 +2412,9 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2428,10 +2412,9 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size += BYTES_PER_WORD; size += BYTES_PER_WORD;
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size if (size >= kmalloc_size(INDEX_L3 + 1)
&& cachep->object_size > cache_line_size() && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
&& ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
size = PAGE_SIZE; size = PAGE_SIZE;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment