Commit 03ec0ed5 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Linus Torvalds

slub: fix kmem cgroup bug in kmem_cache_alloc_bulk

The call slab_pre_alloc_hook() interacts with kmemgc and is not allowed to
be called several times inside the bulk alloc for loop, due to the call to
memcg_kmem_get_cache().

This would result in hitting the VM_BUG_ON in __memcg_kmem_get_cache.

As suggested by Vladimir Davydov, change slab_post_alloc_hook() to be able
to handle an array of objects.

A subtle detail is, loop iterator "i" in slab_post_alloc_hook() must have
same type (size_t) as size argument.  This helps the compiler to easier
realize that it can remove the loop, when all debug statements inside loop
evaluates to nothing.  Note, this is only an issue because the kernel is
compiled with GCC option: -fno-strict-overflow

In slab_alloc_node() the compiler inlines and optimizes the invocation of
slab_post_alloc_hook(s, flags, 1, &object) by removing the loop and access
object directly.
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Reported-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Suggested-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Reviewed-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d0ecd894
...@@ -1292,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, ...@@ -1292,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
return memcg_kmem_get_cache(s, flags); return memcg_kmem_get_cache(s, flags);
} }
static inline void slab_post_alloc_hook(struct kmem_cache *s, static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
gfp_t flags, void *object) size_t size, void **p)
{ {
size_t i;
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
void *object = p[i];
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); kmemleak_alloc_recursive(object, s->object_size, 1,
memcg_kmem_put_cache(s); s->flags, flags);
kasan_slab_alloc(s, object); kasan_slab_alloc(s, object);
}
memcg_kmem_put_cache(s);
} }
static inline void slab_free_hook(struct kmem_cache *s, void *x) static inline void slab_free_hook(struct kmem_cache *s, void *x)
...@@ -2475,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2475,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
static __always_inline void *slab_alloc_node(struct kmem_cache *s, static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr) gfp_t gfpflags, int node, unsigned long addr)
{ {
void **object; void *object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
struct page *page; struct page *page;
unsigned long tid; unsigned long tid;
...@@ -2554,7 +2561,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2554,7 +2561,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
if (unlikely(gfpflags & __GFP_ZERO) && object) if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->object_size); memset(object, 0, s->object_size);
slab_post_alloc_hook(s, gfpflags, object); slab_post_alloc_hook(s, gfpflags, 1, &object);
return object; return object;
} }
...@@ -2904,6 +2911,10 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -2904,6 +2911,10 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
int i; int i;
/* memcg and kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
if (unlikely(!s))
return false;
/* /*
* Drain objects in the per cpu slab, while disabling local * Drain objects in the per cpu slab, while disabling local
* IRQs, which protects against PREEMPT and interrupts * IRQs, which protects against PREEMPT and interrupts
...@@ -2928,17 +2939,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -2928,17 +2939,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
c = this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
continue; /* goto for-loop */ continue; /* goto for-loop */
} }
/* kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
if (unlikely(!s))
goto error;
c->freelist = get_freepointer(s, object); c->freelist = get_freepointer(s, object);
p[i] = object; p[i] = object;
/* kmem_cache debug support */
slab_post_alloc_hook(s, flags, object);
} }
c->tid = next_tid(c->tid); c->tid = next_tid(c->tid);
local_irq_enable(); local_irq_enable();
...@@ -2951,11 +2953,13 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -2951,11 +2953,13 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
memset(p[j], 0, s->object_size); memset(p[j], 0, s->object_size);
} }
/* memcg and kmem_cache debug support */
slab_post_alloc_hook(s, flags, size, p);
return true; return true;
error: error:
__kmem_cache_free_bulk(s, i, p);
local_irq_enable(); local_irq_enable();
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return false; return false;
} }
EXPORT_SYMBOL(kmem_cache_alloc_bulk); EXPORT_SYMBOL(kmem_cache_alloc_bulk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment