Commit fa417ab7 authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()

Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
first with irqs enabled and then recheck with irqs disabled before grabbing
the slab page. Mostly preparatory for the following patches.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 0b303fb4
...@@ -2706,11 +2706,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2706,11 +2706,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(node != NUMA_NO_NODE && if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes))) !node_isset(node, slab_nodes)))
node = NUMA_NO_NODE; node = NUMA_NO_NODE;
local_irq_save(flags);
if (unlikely(c->page)) {
local_irq_restore(flags);
goto reread_page;
}
goto new_slab; goto new_slab;
} }
redo: redo:
...@@ -2751,6 +2746,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2751,6 +2746,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (!freelist) { if (!freelist) {
c->page = NULL; c->page = NULL;
local_irq_restore(flags);
stat(s, DEACTIVATE_BYPASS); stat(s, DEACTIVATE_BYPASS);
goto new_slab; goto new_slab;
} }
...@@ -2780,12 +2776,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2780,12 +2776,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto reread_page; goto reread_page;
} }
deactivate_slab(s, page, c->freelist, c); deactivate_slab(s, page, c->freelist, c);
local_irq_restore(flags);
new_slab: new_slab:
lockdep_assert_irqs_disabled();
if (slub_percpu_partial(c)) { if (slub_percpu_partial(c)) {
local_irq_save(flags);
if (unlikely(c->page)) {
local_irq_restore(flags);
goto reread_page;
}
if (unlikely(!slub_percpu_partial(c)))
goto new_objects; /* stolen by an IRQ handler */
page = c->page = slub_percpu_partial(c); page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page); slub_set_percpu_partial(c, page);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -2793,6 +2796,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2793,6 +2796,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto redo; goto redo;
} }
local_irq_save(flags);
if (unlikely(c->page)) {
local_irq_restore(flags);
goto reread_page;
}
new_objects:
lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page); freelist = get_partial(s, gfpflags, node, &page);
if (freelist) { if (freelist) {
c->page = page; c->page = page;
...@@ -2825,16 +2838,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2825,16 +2838,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
check_new_page: check_new_page:
if (kmem_cache_debug(s)) { if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, page, freelist, addr)) if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */ /* Slab failed checks. Next slab needed */
c->page = NULL;
local_irq_restore(flags);
goto new_slab; goto new_slab;
else } else {
/* /*
* For debug case, we don't load freelist so that all * For debug case, we don't load freelist so that all
* allocations go through alloc_debug_processing() * allocations go through alloc_debug_processing()
*/ */
goto return_single; goto return_single;
} }
}
if (unlikely(!pfmemalloc_match(page, gfpflags))) if (unlikely(!pfmemalloc_match(page, gfpflags)))
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment