Commit 3217fd9b authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: make criteria for off slab determination robust and simple

To become an off slab, there are some constraints to avoid bootstrapping
problem and recursive call.  This can be avoided differently by simply
checking that corresponding kmalloc cache is ready and it's not a off
slab.  It would be more robust because static size checking can be
affected by cache size change or architecture type but dynamic checking
isn't.

One check 'freelist_cache->size > cachep->size / 2' is added to check
benefit of choosing off slab, because, now, there is no size constraint
which ensures enough advantage when selecting off slab.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f3a3c320
...@@ -272,7 +272,6 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) ...@@ -272,7 +272,6 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
#define CFLGS_OFF_SLAB (0x80000000UL) #define CFLGS_OFF_SLAB (0x80000000UL)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 5, KMALLOC_MIN_SIZE + 1))
#define BATCHREFILL_LIMIT 16 #define BATCHREFILL_LIMIT 16
/* /*
...@@ -1879,7 +1878,6 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) ...@@ -1879,7 +1878,6 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
static size_t calculate_slab_order(struct kmem_cache *cachep, static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, unsigned long flags) size_t size, unsigned long flags)
{ {
unsigned long offslab_limit;
size_t left_over = 0; size_t left_over = 0;
int gfporder; int gfporder;
...@@ -1896,16 +1894,24 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -1896,16 +1894,24 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
break; break;
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
struct kmem_cache *freelist_cache;
size_t freelist_size;
freelist_size = num * sizeof(freelist_idx_t);
freelist_cache = kmalloc_slab(freelist_size, 0u);
if (!freelist_cache)
continue;
/* /*
* Max number of objs-per-slab for caches which * Needed to avoid possible looping condition
* use off-slab slabs. Needed to avoid a possible * in cache_grow()
* looping condition in cache_grow().
*/ */
offslab_limit = size; if (OFF_SLAB(freelist_cache))
offslab_limit /= sizeof(freelist_idx_t); continue;
if (num > offslab_limit) /* check if off slab has enough benefit */
break; if (freelist_cache->size > cachep->size / 2)
continue;
} }
/* Found something acceptable - save it away */ /* Found something acceptable - save it away */
...@@ -2031,17 +2037,9 @@ static bool set_off_slab_cache(struct kmem_cache *cachep, ...@@ -2031,17 +2037,9 @@ static bool set_off_slab_cache(struct kmem_cache *cachep,
cachep->num = 0; cachep->num = 0;
/* /*
* Determine if the slab management is 'on' or 'off' slab. * Always use on-slab management when SLAB_NOLEAKTRACE
* (bootstrapping cannot cope with offslab caches so don't do * to avoid recursive calls into kmemleak.
* it too early on. Always use on-slab management when
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/ */
if (size < OFF_SLAB_MIN_SIZE)
return false;
if (slab_early_init)
return false;
if (flags & SLAB_NOLEAKTRACE) if (flags & SLAB_NOLEAKTRACE)
return false; return false;
...@@ -2205,7 +2203,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2205,7 +2203,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* sized slab is initialized in current slab initialization sequence. * sized slab is initialized in current slab initialization sequence.
*/ */
if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
size >= 256 && cachep->object_size > cache_line_size()) { size >= 256 && cachep->object_size > cache_line_size()) {
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
size_t tmp_size = ALIGN(size, PAGE_SIZE); size_t tmp_size = ALIGN(size, PAGE_SIZE);
...@@ -2254,14 +2251,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2254,14 +2251,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (OFF_SLAB(cachep)) { if (OFF_SLAB(cachep)) {
cachep->freelist_cache = cachep->freelist_cache =
kmalloc_slab(cachep->freelist_size, 0u); kmalloc_slab(cachep->freelist_size, 0u);
/*
* This is a possibility for one of the kmalloc_{dma,}_caches.
* But since we go off slab only for object size greater than
* OFF_SLAB_MIN_SIZE, and kmalloc_{dma,}_caches get created
* in ascending order,this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
} }
err = setup_cpu_cache(cachep, gfp); err = setup_cpu_cache(cachep, gfp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment