Commit ed4cd17e authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka

mm/sl[au]b: introduce common alloc/free functions without tracepoint

To unify kmalloc functions in later patch, introduce common alloc/free
functions that does not have tracepoint.
Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent d6a71648
...@@ -3560,6 +3560,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3560,6 +3560,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid, size_t orig_size,
unsigned long caller)
{
return slab_alloc_node(cachep, NULL, flags, nodeid,
orig_size, caller);
}
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
...@@ -3645,6 +3653,26 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -3645,6 +3653,26 @@ void *__kmalloc(size_t size, gfp_t flags)
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
static __always_inline
void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
unsigned long flags;
local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, caller);
local_irq_restore(flags);
}
void __kmem_cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
__do_kmem_cache_free(cachep, objp, caller);
}
/** /**
* kmem_cache_free - Deallocate an object * kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from. * @cachep: The cache the allocation was from.
...@@ -3655,18 +3683,12 @@ EXPORT_SYMBOL(__kmalloc); ...@@ -3655,18 +3683,12 @@ EXPORT_SYMBOL(__kmalloc);
*/ */
void kmem_cache_free(struct kmem_cache *cachep, void *objp) void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{ {
unsigned long flags;
cachep = cache_from_obj(cachep, objp); cachep = cache_from_obj(cachep, objp);
if (!cachep) if (!cachep)
return; return;
trace_kmem_cache_free(_RET_IP_, objp, cachep->name); trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
local_irq_save(flags); __do_kmem_cache_free(cachep, objp, _RET_IP_);
debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, _RET_IP_);
local_irq_restore(flags);
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
......
...@@ -273,6 +273,11 @@ void create_kmalloc_caches(slab_flags_t); ...@@ -273,6 +273,11 @@ void create_kmalloc_caches(slab_flags_t);
/* Find the kmalloc slab corresponding for a certain size */ /* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t); struct kmem_cache *kmalloc_slab(size_t, gfp_t);
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller);
void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
#endif #endif
void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node); void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
......
...@@ -3262,6 +3262,14 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, ...@@ -3262,6 +3262,14 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
} }
EXPORT_SYMBOL(kmem_cache_alloc_lru); EXPORT_SYMBOL(kmem_cache_alloc_lru);
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller)
{
return slab_alloc_node(s, NULL, gfpflags, node,
caller, orig_size);
}
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
...@@ -3526,6 +3534,11 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) ...@@ -3526,6 +3534,11 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
} }
#endif #endif
void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
{
slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
}
void kmem_cache_free(struct kmem_cache *s, void *x) void kmem_cache_free(struct kmem_cache *s, void *x)
{ {
s = cache_from_obj(s, x); s = cache_from_obj(s, x);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment