Commit 588f8ba9 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

mm/slub: move slab initialization into irq enabled region

Initializing a new slab can introduce rather large latencies because most
of the initialization runs always with interrupts disabled.

There is no point in doing so.  The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(), so
for all allocations with GFP_WAIT set, interrupts are enabled.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3eed034d
...@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) ...@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
kasan_slab_free(s, x); kasan_slab_free(s, x);
} }
static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
}
/* /*
* Slab allocation and freeing * Slab allocation and freeing
*/ */
...@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page; struct page *page;
struct kmem_cache_order_objects oo = s->oo; struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp; gfp_t alloc_gfp;
void *start, *p;
int idx, order;
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
...@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Try a lower order alloc if possible * Try a lower order alloc if possible
*/ */
page = alloc_slab_page(s, alloc_gfp, node, oo); page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page))
if (page) goto out;
stat(s, ORDER_FALLBACK); stat(s, ORDER_FALLBACK);
} }
if (kmemcheck_enabled && page if (kmemcheck_enabled &&
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo); int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
...@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
kmemcheck_mark_unallocated_pages(page, pages); kmemcheck_mark_unallocated_pages(page, pages);
} }
if (flags & __GFP_WAIT)
local_irq_disable();
if (!page)
return NULL;
page->objects = oo_objects(oo); page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
return page;
}
static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
void *start;
void *p;
int order;
int idx;
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
BUG();
}
page = allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
if (!page)
goto out;
order = compound_order(page); order = compound_order(page);
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s; page->slab_cache = s;
__SetPageSlab(page); __SetPageSlab(page);
if (page_is_pfmemalloc(page)) if (page_is_pfmemalloc(page))
...@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
page->freelist = start; page->freelist = start;
page->inuse = page->objects; page->inuse = page->objects;
page->frozen = 1; page->frozen = 1;
out: out:
if (flags & __GFP_WAIT)
local_irq_disable();
if (!page)
return NULL;
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
inc_slabs_node(s, page_to_nid(page), page->objects);
return page; return page;
} }
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
BUG();
}
return allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
static void __free_slab(struct kmem_cache *s, struct page *page) static void __free_slab(struct kmem_cache *s, struct page *page)
{ {
int order = compound_order(page); int order = compound_order(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment