Commit 6cb8f913 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics

Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the
allocators.  Move ZERO_SIZE_PTR related stuff into slab.h.

Make ZERO_SIZE_PTR work for all slab allocators and get rid of the
WARN_ON_ONCE(size == 0) that is still remaining in SLAB.

Make slub return NULL like the other allocators if a too large memory segment
is requested via __kmalloc.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ef2ad80c
...@@ -30,6 +30,19 @@ ...@@ -30,6 +30,19 @@
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
*
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
#define ZERO_SIZE_PTR ((void *)16)
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
(unsigned long)ZERO_SIZE_PTR)
/* /*
* struct kmem_cache related prototypes * struct kmem_cache related prototypes
*/ */
......
...@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) ...@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags)
{ {
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i = 0;
if (!size)
return ZERO_SIZE_PTR;
#define CACHE(x) \ #define CACHE(x) \
if (size <= x) \ if (size <= x) \
goto found; \ goto found; \
...@@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags) ...@@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags)
{ {
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i = 0;
if (!size)
return ZERO_SIZE_PTR;
#define CACHE(x) \ #define CACHE(x) \
if (size <= x) \ if (size <= x) \
goto found; \ goto found; \
...@@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
int i = 0; int i = 0;
if (!size)
return ZERO_SIZE_PTR;
#define CACHE(x) \ #define CACHE(x) \
if (size <= x) \ if (size <= x) \
goto found; \ goto found; \
......
...@@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA 0 #define SLUB_DMA 0
#endif #endif
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
*
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
#define ZERO_SIZE_PTR ((void *)16)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
......
...@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, ...@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/ */
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif #endif
if (!size)
return ZERO_SIZE_PTR;
while (size > csizep->cs_size) while (size > csizep->cs_size)
csizep++; csizep++;
...@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* this should not happen at all. * this should not happen at all.
* But leave a BUG_ON for some lucky dude. * But leave a BUG_ON for some lucky dude.
*/ */
BUG_ON(!cachep->slabp_cache); BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
} }
cachep->ctor = ctor; cachep->ctor = ctor;
cachep->name = name; cachep->name = name;
...@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) ...@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
struct kmem_cache *cachep; struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags); cachep = kmem_find_general_cachep(size, flags);
if (unlikely(cachep == NULL)) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return NULL; return cachep;
return kmem_cache_alloc_node(cachep, flags, node); return kmem_cache_alloc_node(cachep, flags, node);
} }
...@@ -3760,7 +3763,7 @@ void kfree(const void *objp) ...@@ -3760,7 +3763,7 @@ void kfree(const void *objp)
struct kmem_cache *c; struct kmem_cache *c;
unsigned long flags; unsigned long flags;
if (unlikely(!objp)) if (unlikely(ZERO_OR_NULL_PTR(objp)))
return; return;
local_irq_save(flags); local_irq_save(flags);
kfree_debugcheck(objp); kfree_debugcheck(objp);
...@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = { ...@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = {
*/ */
size_t ksize(const void *objp) size_t ksize(const void *objp)
{ {
if (unlikely(objp == NULL)) if (unlikely(ZERO_OR_NULL_PTR(objp)))
return 0; return 0;
return obj_size(virt_to_cache(objp)); return obj_size(virt_to_cache(objp));
......
...@@ -347,7 +347,7 @@ static void slob_free(void *block, int size) ...@@ -347,7 +347,7 @@ static void slob_free(void *block, int size)
slobidx_t units; slobidx_t units;
unsigned long flags; unsigned long flags;
if (!block) if (ZERO_OR_NULL_PTR(block))
return; return;
BUG_ON(!size); BUG_ON(!size);
...@@ -424,10 +424,13 @@ static void slob_free(void *block, int size) ...@@ -424,10 +424,13 @@ static void slob_free(void *block, int size)
void *__kmalloc_node(size_t size, gfp_t gfp, int node) void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{ {
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
if (size < PAGE_SIZE - align) { if (size < PAGE_SIZE - align) {
unsigned int *m; if (!size)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node); m = slob_alloc(size + align, gfp, align, node);
if (m) if (m)
*m = size; *m = size;
...@@ -450,7 +453,7 @@ void kfree(const void *block) ...@@ -450,7 +453,7 @@ void kfree(const void *block)
{ {
struct slob_page *sp; struct slob_page *sp;
if (!block) if (ZERO_OR_NULL_PTR(block))
return; return;
sp = (struct slob_page *)virt_to_page(block); sp = (struct slob_page *)virt_to_page(block);
...@@ -468,7 +471,7 @@ size_t ksize(const void *block) ...@@ -468,7 +471,7 @@ size_t ksize(const void *block)
{ {
struct slob_page *sp; struct slob_page *sp;
if (!block) if (ZERO_OR_NULL_PTR(block))
return 0; return 0;
sp = (struct slob_page *)virt_to_page(block); sp = (struct slob_page *)virt_to_page(block);
......
...@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) ...@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
int index = kmalloc_index(size); int index = kmalloc_index(size);
if (!index) if (!index)
return NULL; return ZERO_SIZE_PTR;
/* Allocation too large? */ /* Allocation too large? */
BUG_ON(index < 0); if (index < 0)
return NULL;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if ((flags & SLUB_DMA)) { if ((flags & SLUB_DMA)) {
...@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags)
{ {
struct kmem_cache *s = get_slab(size, flags); struct kmem_cache *s = get_slab(size, flags);
if (s) if (ZERO_OR_NULL_PTR(s))
return slab_alloc(s, flags, -1, __builtin_return_address(0)); return s;
return ZERO_SIZE_PTR;
return slab_alloc(s, flags, -1, __builtin_return_address(0));
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
...@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
struct kmem_cache *s = get_slab(size, flags); struct kmem_cache *s = get_slab(size, flags);
if (s) if (ZERO_OR_NULL_PTR(s))
return slab_alloc(s, flags, node, __builtin_return_address(0)); return s;
return ZERO_SIZE_PTR;
return slab_alloc(s, flags, node, __builtin_return_address(0));
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif #endif
...@@ -2378,7 +2381,7 @@ void kfree(const void *x) ...@@ -2378,7 +2381,7 @@ void kfree(const void *x)
* this comparison would be true for all "negative" pointers * this comparison would be true for all "negative" pointers
* (which would cover the whole upper half of the address space). * (which would cover the whole upper half of the address space).
*/ */
if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) if (ZERO_OR_NULL_PTR(x))
return; return;
page = virt_to_head_page(x); page = virt_to_head_page(x);
...@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) ...@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{ {
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
if (!s) if (ZERO_OR_NULL_PTR(s))
return ZERO_SIZE_PTR; return s;
return slab_alloc(s, gfpflags, -1, caller); return slab_alloc(s, gfpflags, -1, caller);
} }
...@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{ {
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
if (!s) if (ZERO_OR_NULL_PTR(s))
return ZERO_SIZE_PTR; return s;
return slab_alloc(s, gfpflags, node, caller); return slab_alloc(s, gfpflags, node, caller);
} }
......
...@@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
if (unlikely(!new_size)) { if (unlikely(!new_size)) {
kfree(p); kfree(p);
return NULL; return ZERO_SIZE_PTR;
} }
ks = ksize(p); ks = ksize(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment