Commit 376bf125 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Linus Torvalds

slub: clean up code for kmem cgroup support to kmem_cache_free_bulk

This change is primarily an attempt to make it easier to realize the
optimizations the compiler performs in-case CONFIG_MEMCG_KMEM is not
enabled.

Performance wise, even when CONFIG_MEMCG_KMEM is compiled in, the
overhead is zero.  This is because, as long as no process have enabled
kmem cgroups accounting, the assignment is replaced by asm-NOP
operations.  This is possible because memcg_kmem_enabled() uses a
static_key_false() construct.

It also helps readability as it avoid accessing the p[] array like:
p[size - 1] which "expose" that the array is processed backwards inside
helper function build_detached_freelist().

Lastly this also makes the code more robust, in error case like passing
NULL pointers in the array.  Which were previously handled before commit
03374518 ("slub: add missing kmem cgroup support to
kmem_cache_free_bulk").

Fixes: 03374518 ("slub: add missing kmem cgroup support to kmem_cache_free_bulk")
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dec63a4d
...@@ -2815,6 +2815,7 @@ struct detached_freelist { ...@@ -2815,6 +2815,7 @@ struct detached_freelist {
void *tail; void *tail;
void *freelist; void *freelist;
int cnt; int cnt;
struct kmem_cache *s;
}; };
/* /*
...@@ -2829,8 +2830,9 @@ struct detached_freelist { ...@@ -2829,8 +2830,9 @@ struct detached_freelist {
* synchronization primitive. Look ahead in the array is limited due * synchronization primitive. Look ahead in the array is limited due
* to performance reasons. * to performance reasons.
*/ */
static int build_detached_freelist(struct kmem_cache *s, size_t size, static inline
void **p, struct detached_freelist *df) int build_detached_freelist(struct kmem_cache *s, size_t size,
void **p, struct detached_freelist *df)
{ {
size_t first_skipped_index = 0; size_t first_skipped_index = 0;
int lookahead = 3; int lookahead = 3;
...@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size, ...@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!object) if (!object)
return 0; return 0;
/* Support for memcg, compiler can optimize this out */
df->s = cache_from_obj(s, object);
/* Start new detached freelist */ /* Start new detached freelist */
set_freepointer(s, object, NULL); set_freepointer(df->s, object, NULL);
df->page = virt_to_head_page(object); df->page = virt_to_head_page(object);
df->tail = object; df->tail = object;
df->freelist = object; df->freelist = object;
...@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size, ...@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
/* df->page is always set at this point */ /* df->page is always set at this point */
if (df->page == virt_to_head_page(object)) { if (df->page == virt_to_head_page(object)) {
/* Opportunity build freelist */ /* Opportunity build freelist */
set_freepointer(s, object, df->freelist); set_freepointer(df->s, object, df->freelist);
df->freelist = object; df->freelist = object;
df->cnt++; df->cnt++;
p[size] = NULL; /* mark object processed */ p[size] = NULL; /* mark object processed */
...@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size, ...@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
return first_skipped_index; return first_skipped_index;
} }
/* Note that interrupts must be enabled when calling this function. */ /* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{ {
if (WARN_ON(!size)) if (WARN_ON(!size))
return; return;
do { do {
struct detached_freelist df; struct detached_freelist df;
struct kmem_cache *s;
/* Support for memcg */
s = cache_from_obj(orig_s, p[size - 1]);
size = build_detached_freelist(s, size, p, &df); size = build_detached_freelist(s, size, p, &df);
if (unlikely(!df.page)) if (unlikely(!df.page))
continue; continue;
slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
} while (likely(size)); } while (likely(size));
} }
EXPORT_SYMBOL(kmem_cache_free_bulk); EXPORT_SYMBOL(kmem_cache_free_bulk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment