Commit bbd4e305 authored by chenqiwu's avatar chenqiwu Committed by Linus Torvalds

mm/slub.c: replace kmem_cache->cpu_partial with wrapped APIs

There are slub_cpu_partial() and slub_set_cpu_partial() APIs to wrap
kmem_cache->cpu_partial.  This patch will use the two APIs to replace
kmem_cache->cpu_partial in slub code.
Signed-off-by: default avatarchenqiwu <chenqiwu@xiaomi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/1582079562-17980-1-git-send-email-qiwuchen55@gmail.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4c7ba22e
...@@ -2282,7 +2282,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -2282,7 +2282,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
if (oldpage) { if (oldpage) {
pobjects = oldpage->pobjects; pobjects = oldpage->pobjects;
pages = oldpage->pages; pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) { if (drain && pobjects > slub_cpu_partial(s)) {
unsigned long flags; unsigned long flags;
/* /*
* partial array is full. Move the existing * partial array is full. Move the existing
...@@ -2307,7 +2307,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -2307,7 +2307,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage); != oldpage);
if (unlikely(!s->cpu_partial)) { if (unlikely(!slub_cpu_partial(s))) {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -3512,15 +3512,15 @@ static void set_cpu_partial(struct kmem_cache *s) ...@@ -3512,15 +3512,15 @@ static void set_cpu_partial(struct kmem_cache *s)
* 50% to keep some capacity around for frees. * 50% to keep some capacity around for frees.
*/ */
if (!kmem_cache_has_cpu_partial(s)) if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0; slub_set_cpu_partial(s, 0);
else if (s->size >= PAGE_SIZE) else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2; slub_set_cpu_partial(s, 2);
else if (s->size >= 1024) else if (s->size >= 1024)
s->cpu_partial = 6; slub_set_cpu_partial(s, 6);
else if (s->size >= 256) else if (s->size >= 256)
s->cpu_partial = 13; slub_set_cpu_partial(s, 13);
else else
s->cpu_partial = 30; slub_set_cpu_partial(s, 30);
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment