Commit f315e3fa authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg

slab: restrict the number of objects in a slab

To prepare to implement byte sized index for managing the freelist
of a slab, we should restrict the number of objects in a slab to be less
or equal to 256, since byte only represent 256 different values.
Setting the size of object to value equal or more than newly introduced
SLAB_OBJ_MIN_SIZE ensures that the number of objects in a slab is less or
equal to 256 for a slab with 1 page.

If page size is rather larger than 4096, above assumption would be wrong.
In this case, we would fall back on 2 bytes sized index.

If minimum size of kmalloc is less than 16, we use it as minimum object
size and give up this optimization.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent e5c58dfd
...@@ -201,6 +201,17 @@ struct kmem_cache { ...@@ -201,6 +201,17 @@ struct kmem_cache {
#ifndef KMALLOC_SHIFT_LOW #ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5 #define KMALLOC_SHIFT_LOW 5
#endif #endif
/*
* This restriction comes from byte sized index implementation.
* Page size is normally 2^12 bytes and, in this case, if we want to use
* byte sized index which can represent 2^8 entries, the size of the object
* should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
* If minimum size of kmalloc is less than 16, we use it as minimum object
* size and give up to use byte sized index.
*/
#define SLAB_OBJ_MIN_SIZE (KMALLOC_SHIFT_LOW < 4 ? \
(1 << KMALLOC_SHIFT_LOW) : 16)
#endif #endif
#ifdef CONFIG_SLUB #ifdef CONFIG_SLUB
......
...@@ -157,6 +157,17 @@ ...@@ -157,6 +157,17 @@
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif #endif
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif
#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
/* /*
* true if a page was allocated from pfmemalloc reserves for network-based * true if a page was allocated from pfmemalloc reserves for network-based
* swap * swap
...@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num) if (!num)
continue; continue;
/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
if (num > SLAB_OBJ_MAX_NUM)
break;
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
/* /*
* Max number of objs-per-slab for caches which * Max number of objs-per-slab for caches which
...@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
flags |= CFLGS_OFF_SLAB; flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, cachep->align); size = ALIGN(size, cachep->align);
/*
* We should restrict the number of objects in a slab to implement
* byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
*/
if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
left_over = calculate_slab_order(cachep, size, cachep->align, flags); left_over = calculate_slab_order(cachep, size, cachep->align, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment