Commit 213eeb9f authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slub: Extract get_freelist from __slab_alloc

get_freelist retrieves free objects from the page freelist (put there by remote
frees) or deactivates a slab page if no more objects are available.
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 73736e03
...@@ -2126,6 +2126,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, ...@@ -2126,6 +2126,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
return object; return object;
} }
/*
* Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
* or deactivate the page.
*
* The page is still frozen if the return value is not NULL.
*
* If this function returns NULL then the page has been unfrozen.
*/
static inline void *get_freelist(struct kmem_cache *s, struct page *page)
{
struct page new;
unsigned long counters;
void *freelist;
do {
freelist = page->freelist;
counters = page->counters;
new.counters = counters;
VM_BUG_ON(!new.frozen);
new.inuse = page->objects;
new.frozen = freelist != NULL;
} while (!cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
"get_freelist"));
return freelist;
}
/* /*
* Slow path. The lockless freelist is empty or we need to perform * Slow path. The lockless freelist is empty or we need to perform
* debugging duties. * debugging duties.
...@@ -2147,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2147,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{ {
void **object; void **object;
unsigned long flags; unsigned long flags;
struct page new;
unsigned long counters;
local_irq_save(flags); local_irq_save(flags);
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -2176,29 +2205,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2176,29 +2205,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLOWPATH); stat(s, ALLOC_SLOWPATH);
do { object = get_freelist(s, c->page);
object = c->page->freelist;
counters = c->page->counters;
new.counters = counters;
VM_BUG_ON(!new.frozen);
/*
* If there is no object left then we use this loop to
* deactivate the slab which is simple since no objects
* are left in the slab and therefore we do not need to
* put the page back onto the partial list.
*
* If there are objects left then we retrieve them
* and use them to refill the per cpu queue.
*/
new.inuse = c->page->objects;
new.frozen = object != NULL;
} while (!__cmpxchg_double_slab(s, c->page,
object, counters,
NULL, new.counters,
"__slab_alloc"));
if (!object) { if (!object) {
c->page = NULL; c->page = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment