Commit 158e319b authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: clean up cache type determination

Current cache type determination code is open-code and looks not
understandable.  Following patch will introduce one more cache type and
it would make code more complex.  So, before it happens, this patch
abstracts these codes.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 832a15d2
...@@ -2023,6 +2023,64 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, ...@@ -2023,6 +2023,64 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
return cachep; return cachep;
} }
static bool set_off_slab_cache(struct kmem_cache *cachep,
size_t size, unsigned long flags)
{
size_t left;
cachep->num = 0;
/*
* Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do
* it too early on. Always use on-slab management when
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
if (size < OFF_SLAB_MIN_SIZE)
return false;
if (slab_early_init)
return false;
if (flags & SLAB_NOLEAKTRACE)
return false;
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
*/
left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
if (!cachep->num)
return false;
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
if (left >= cachep->num * sizeof(freelist_idx_t))
return false;
cachep->colour = left / cachep->colour_off;
return true;
}
static bool set_on_slab_cache(struct kmem_cache *cachep,
size_t size, unsigned long flags)
{
size_t left;
cachep->num = 0;
left = calculate_slab_order(cachep, size, flags);
if (!cachep->num)
return false;
cachep->colour = left / cachep->colour_off;
return true;
}
/** /**
* __kmem_cache_create - Create a cache. * __kmem_cache_create - Create a cache.
* @cachep: cache management descriptor * @cachep: cache management descriptor
...@@ -2047,7 +2105,6 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, ...@@ -2047,7 +2105,6 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
int int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{ {
size_t left_over, freelist_size;
size_t ralign = BYTES_PER_WORD; size_t ralign = BYTES_PER_WORD;
gfp_t gfp; gfp_t gfp;
int err; int err;
...@@ -2098,6 +2155,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2098,6 +2155,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* 4) Store it. * 4) Store it.
*/ */
cachep->align = ralign; cachep->align = ralign;
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
if (slab_is_available()) if (slab_is_available())
gfp = GFP_KERNEL; gfp = GFP_KERNEL;
...@@ -2152,43 +2213,18 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2152,43 +2213,18 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
} }
#endif #endif
/* if (set_off_slab_cache(cachep, size, flags)) {
* Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do
* it too early on. Always use on-slab management when
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init &&
!(flags & SLAB_NOLEAKTRACE)) {
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
*/
flags |= CFLGS_OFF_SLAB; flags |= CFLGS_OFF_SLAB;
goto done;
} }
left_over = calculate_slab_order(cachep, size, flags); if (set_on_slab_cache(cachep, size, flags))
goto done;
if (!cachep->num)
return -E2BIG; return -E2BIG;
freelist_size = cachep->num * sizeof(freelist_idx_t); done:
cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
flags &= ~CFLGS_OFF_SLAB;
left_over -= freelist_size;
}
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off;
cachep->freelist_size = freelist_size;
cachep->flags = flags; cachep->flags = flags;
cachep->allocflags = __GFP_COMP; cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
...@@ -2209,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2209,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
#endif #endif
if (OFF_SLAB(cachep)) { if (OFF_SLAB(cachep)) {
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); cachep->freelist_cache =
kmalloc_slab(cachep->freelist_size, 0u);
/* /*
* This is a possibility for one of the kmalloc_{dma,}_caches. * This is a possibility for one of the kmalloc_{dma,}_caches.
* But since we go off slab only for object size greater than * But since we go off slab only for object size greater than
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment