Commit 125b79d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
 "New and noteworthy:

  * More SLAB allocator unification patches from Christoph Lameter and
    others.  This paves the way for slab memcg patches that hopefully
    will land in v3.8.

  * SLAB tracing improvements from Ezequiel Garcia.

  * Kernel tainting upon SLAB corruption from Dave Jones.

  * Miscellanous SLAB allocator bug fixes and improvements from various
    people."

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits)
  slab: Fix build failure in __kmem_cache_create()
  slub: init_kmem_cache_cpus() and put_cpu_partial() can be static
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration"
  mm, slob: fix build breakage in __kmalloc_node_track_caller
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  mm/slab: Fix typo _RET_IP -> _RET_IP_
  mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
  mm, slab: Rename __cache_alloc() -> slab_alloc()
  mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype
  mm, slab: Replace 'caller' type, void* -> unsigned long
  mm, slob: Add support for kmalloc_track_caller()
  mm, slab: Remove silly function slab_buffer_size()
  mm, slob: Use NUMA_NO_NODE instead of -1
  mm, sl[au]b: Taint kernel when we detect a corrupted slab
  slab: Only define slab_error for DEBUG
  slab: fix the DEADLOCK issue on l3 alien lock
  slub: Zero initial memory segment for kmem_cache and kmem_cache_node
  Revert "mm/sl[aou]b: Move sysfs_slab_add to common"
  mm/sl[aou]b: Move kmem_cache refcounting to common code
  ...
parents f1c6872e e2087be3
...@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, ...@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from. * request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \ #define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_) __kmalloc_track_caller(size, flags, _RET_IP_)
...@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); ...@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from. * allocation request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \ __kmalloc_node_track_caller(size, flags, node, \
......
...@@ -45,7 +45,6 @@ struct kmem_cache { ...@@ -45,7 +45,6 @@ struct kmem_cache {
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache; struct kmem_cache *slabp_cache;
unsigned int slab_size; unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
/* constructor func */ /* constructor func */
void (*ctor)(void *obj); void (*ctor)(void *obj);
...@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); ...@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size, extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif #endif
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
...@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_trace(size, cachep, flags); ret = kmem_cache_alloc_trace(cachep, flags, size);
return ret; return ret;
} }
...@@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); ...@@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_trace(size_t size, extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid); int nodeid,
size_t size);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_node_trace(size_t size, kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid,
size_t size)
{ {
return kmem_cache_alloc_node(cachep, flags, nodeid); return kmem_cache_alloc_node(cachep, flags, nodeid);
} }
...@@ -207,7 +200,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -207,7 +200,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size);
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
......
#ifndef __LINUX_SLOB_DEF_H #ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H
#include <linux/numa.h>
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
gfp_t flags) gfp_t flags)
{ {
return kmem_cache_alloc_node(cachep, flags, -1); return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
} }
void *__kmalloc_node(size_t size, gfp_t flags, int node); void *__kmalloc_node(size_t size, gfp_t flags, int node);
...@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
*/ */
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
{ {
return __kmalloc_node(size, flags, -1); return __kmalloc_node(size, flags, NUMA_NO_NODE);
} }
static __always_inline void *__kmalloc(size_t size, gfp_t flags) static __always_inline void *__kmalloc(size_t size, gfp_t flags)
......
This diff is collapsed.
...@@ -25,9 +25,26 @@ extern enum slab_state slab_state; ...@@ -25,9 +25,26 @@ extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */ /* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex; extern struct mutex slab_mutex;
/* The list of all slab caches on the system */
extern struct list_head slab_caches; extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, /* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)); size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
int __kmem_cache_shutdown(struct kmem_cache *);
#endif #endif
...@@ -22,6 +22,53 @@ ...@@ -22,6 +22,53 @@
enum slab_state slab_state; enum slab_state slab_state;
LIST_HEAD(slab_caches); LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex); DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
struct kmem_cache *s = NULL;
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
list_for_each_entry(s, &slab_caches, list) {
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
pr_err("Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
s = NULL;
return -EINVAL;
}
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0;
}
#else
static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
return 0;
}
#endif
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
...@@ -52,68 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -52,68 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
unsigned long flags, void (*ctor)(void *)) unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s = NULL; struct kmem_cache *s = NULL;
int err = 0;
#ifdef CONFIG_DEBUG_VM
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "kmem_cache_create(%s) integrity check"
" failed\n", name);
goto out;
}
#endif
get_online_cpus(); get_online_cpus();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
#ifdef CONFIG_DEBUG_VM if (!kmem_cache_sanity_check(name, size) == 0)
list_for_each_entry(s, &slab_caches, list) { goto out_locked;
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
printk(KERN_ERR
"Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) { s = __kmem_cache_alias(name, size, align, flags, ctor);
printk(KERN_ERR "kmem_cache_create(%s): Cache name" if (s)
" already exists.\n", goto out_locked;
name);
dump_stack(); s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
s = NULL; if (s) {
goto oops; s->object_size = s->size = size;
} s->align = align;
s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
err = -ENOMEM;
goto out_locked;
} }
WARN_ON(strchr(name, ' ')); /* It confuses parsers */ err = __kmem_cache_create(s, flags);
#endif if (!err) {
s = __kmem_cache_create(name, size, align, flags, ctor); s->refcount = 1;
list_add(&s->list, &slab_caches);
#ifdef CONFIG_DEBUG_VM } else {
oops: kfree(s->name);
#endif kmem_cache_free(kmem_cache, s);
}
} else
err = -ENOMEM;
out_locked:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
#ifdef CONFIG_DEBUG_VM if (err) {
out:
#endif if (flags & SLAB_PANIC)
if (!s && (flags & SLAB_PANIC)) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
panic("kmem_cache_create: Failed to create slab '%s'\n", name); name, err);
else {
printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
name, err);
dump_stack();
}
return NULL;
}
return s; return s;
} }
EXPORT_SYMBOL(kmem_cache_create); EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *s)
{
get_online_cpus();
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
if (!__kmem_cache_shutdown(s)) {
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
kfree(s->name);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
s->name);
dump_stack();
}
}
mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
int slab_is_available(void) int slab_is_available(void)
{ {
return slab_state >= UP; return slab_state >= UP;
......
...@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) ...@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void *page; void *page;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != -1) if (node != NUMA_NO_NODE)
page = alloc_pages_exact_node(node, gfp, order); page = alloc_pages_exact_node(node, gfp, order);
else else
#endif #endif
...@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
* page with a matching node id in the freelist. * page with a matching node id in the freelist.
*/ */
if (node != -1 && page_to_nid(sp) != node) if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
continue; continue;
#endif #endif
/* Enough room on this page? */ /* Enough room on this page? */
...@@ -425,7 +425,8 @@ static void slob_free(void *block, int size) ...@@ -425,7 +425,8 @@ static void slob_free(void *block, int size)
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/ */
void *__kmalloc_node(size_t size, gfp_t gfp, int node) static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{ {
unsigned int *m; unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
...@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*m = size; *m = size;
ret = (void *)m + align; ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, size + align, gfp, node); size, size + align, gfp, node);
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
...@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page->private = size; page->private = size;
} }
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node); size, PAGE_SIZE << order, gfp, node);
} }
kmemleak_alloc(ret, size, 1, gfp); kmemleak_alloc(ret, size, 1, gfp);
return ret; return ret;
} }
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
return __do_kmalloc_node(size, gfp, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#ifdef CONFIG_TRACING
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
}
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, node, caller);
}
#endif
#endif
void kfree(const void *block) void kfree(const void *block)
{ {
struct page *sp; struct page *sp;
...@@ -508,23 +529,15 @@ size_t ksize(const void *block) ...@@ -508,23 +529,15 @@ size_t ksize(const void *block)
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(ksize);
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
size_t align, unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *c; size_t align = c->size;
c = slob_alloc(sizeof(struct kmem_cache),
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) { if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */ /* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu); c->size += sizeof(struct slob_rcu);
} }
c->flags = flags; c->flags = flags;
c->ctor = ctor;
/* ignore alignment unless it's forced */ /* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN) if (c->align < ARCH_SLAB_MINALIGN)
...@@ -532,20 +545,8 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -532,20 +545,8 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (c->align < align) if (c->align < align)
c->align = align; c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); return 0;
c->refcount = 1;
}
return c;
}
void kmem_cache_destroy(struct kmem_cache *c)
{
kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
} }
EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{ {
...@@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c) ...@@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
} }
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
return 0;
}
int kmem_cache_shrink(struct kmem_cache *d) int kmem_cache_shrink(struct kmem_cache *d)
{ {
return 0; return 0;
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
struct kmem_cache kmem_cache_boot = {
.name = "kmem_cache",
.size = sizeof(struct kmem_cache),
.flags = SLAB_PANIC,
.align = ARCH_KMALLOC_MINALIGN,
};
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
kmem_cache = &kmem_cache_boot;
slab_state = UP; slab_state = UP;
} }
......
This diff is collapsed.
...@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len) ...@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len)
} }
EXPORT_SYMBOL(memdup_user); EXPORT_SYMBOL(memdup_user);
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
gfp_t flags)
{
void *ret;
size_t ks = 0;
if (p)
ks = ksize(p);
if (ks >= new_size)
return (void *)p;
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
memcpy(ret, p, ks);
return ret;
}
/** /**
* __krealloc - like krealloc() but don't free @p. * __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for. * @p: object to reallocate memory for.
...@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user); ...@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user);
*/ */
void *__krealloc(const void *p, size_t new_size, gfp_t flags) void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{ {
void *ret;
size_t ks = 0;
if (unlikely(!new_size)) if (unlikely(!new_size))
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
if (p) return __do_krealloc(p, new_size, flags);
ks = ksize(p);
if (ks >= new_size)
return (void *)p;
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
memcpy(ret, p, ks);
return ret;
} }
EXPORT_SYMBOL(__krealloc); EXPORT_SYMBOL(__krealloc);
...@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
} }
ret = __krealloc(p, new_size, flags); ret = __do_krealloc(p, new_size, flags);
if (ret && p != ret) if (ret && p != ret)
kfree(p); kfree(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment