Commit ff4fcd01 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Pekka Enberg

mm, slab: Remove silly function slab_buffer_size()

This function is seldom used, and can be simply replaced with cachep->size.
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarEzequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 90f2cbbc
...@@ -113,17 +113,12 @@ void *__kmalloc(size_t size, gfp_t flags); ...@@ -113,17 +113,12 @@ void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size, extern void *kmem_cache_alloc_trace(size_t size,
struct kmem_cache *cachep, gfp_t flags); struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif #endif
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
......
...@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
/* /*
* Do not go above this order unless 0 objects fit into the slab or * Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line. * overridden on the command line.
...@@ -3850,7 +3842,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) ...@@ -3850,7 +3842,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret,
size, slab_buffer_size(cachep), flags); size, cachep->size, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
...@@ -3881,7 +3873,7 @@ void *kmem_cache_alloc_node_trace(size_t size, ...@@ -3881,7 +3873,7 @@ void *kmem_cache_alloc_node_trace(size_t size,
ret = __cache_alloc_node(cachep, flags, nodeid, ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0)); __builtin_return_address(0));
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, slab_buffer_size(cachep), size, cachep->size,
flags, nodeid); flags, nodeid);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment