Commit 125b79d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
 "New and noteworthy:

  * More SLAB allocator unification patches from Christoph Lameter and
    others.  This paves the way for slab memcg patches that hopefully
    will land in v3.8.

  * SLAB tracing improvements from Ezequiel Garcia.

  * Kernel tainting upon SLAB corruption from Dave Jones.

  * Miscellanous SLAB allocator bug fixes and improvements from various
    people."

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits)
  slab: Fix build failure in __kmem_cache_create()
  slub: init_kmem_cache_cpus() and put_cpu_partial() can be static
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration"
  mm, slob: fix build breakage in __kmalloc_node_track_caller
  mm/slab: Fix kmem_cache_alloc_node_trace() declaration
  mm/slab: Fix typo _RET_IP -> _RET_IP_
  mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
  mm, slab: Rename __cache_alloc() -> slab_alloc()
  mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype
  mm, slab: Replace 'caller' type, void* -> unsigned long
  mm, slob: Add support for kmalloc_track_caller()
  mm, slab: Remove silly function slab_buffer_size()
  mm, slob: Use NUMA_NO_NODE instead of -1
  mm, sl[au]b: Taint kernel when we detect a corrupted slab
  slab: Only define slab_error for DEBUG
  slab: fix the DEADLOCK issue on l3 alien lock
  slub: Zero initial memory segment for kmem_cache and kmem_cache_node
  Revert "mm/sl[aou]b: Move sysfs_slab_add to common"
  mm/sl[aou]b: Move kmem_cache refcounting to common code
  ...
parents f1c6872e e2087be3
...@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, ...@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from. * request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \ #define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_) __kmalloc_track_caller(size, flags, _RET_IP_)
...@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); ...@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from. * allocation request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \ __kmalloc_node_track_caller(size, flags, node, \
......
...@@ -45,7 +45,6 @@ struct kmem_cache { ...@@ -45,7 +45,6 @@ struct kmem_cache {
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache; struct kmem_cache *slabp_cache;
unsigned int slab_size; unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
/* constructor func */ /* constructor func */
void (*ctor)(void *obj); void (*ctor)(void *obj);
...@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); ...@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size, extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif #endif
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
...@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ...@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_trace(size, cachep, flags); ret = kmem_cache_alloc_trace(cachep, flags, size);
return ret; return ret;
} }
...@@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); ...@@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_trace(size_t size, extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid); int nodeid,
size_t size);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_node_trace(size_t size, kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid,
size_t size)
{ {
return kmem_cache_alloc_node(cachep, flags, nodeid); return kmem_cache_alloc_node(cachep, flags, nodeid);
} }
...@@ -207,7 +200,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -207,7 +200,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size);
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
......
#ifndef __LINUX_SLOB_DEF_H #ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H
#include <linux/numa.h>
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
gfp_t flags) gfp_t flags)
{ {
return kmem_cache_alloc_node(cachep, flags, -1); return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
} }
void *__kmalloc_node(size_t size, gfp_t flags, int node); void *__kmalloc_node(size_t size, gfp_t flags, int node);
...@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
*/ */
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
{ {
return __kmalloc_node(size, flags, -1); return __kmalloc_node(size, flags, NUMA_NO_NODE);
} }
static __always_inline void *__kmalloc(size_t size, gfp_t flags) static __always_inline void *__kmalloc(size_t size, gfp_t flags)
......
...@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) ...@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
/* /*
* Do not go above this order unless 0 objects fit into the slab or * Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line. * overridden on the command line.
...@@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size); ...@@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO; static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata; static bool slab_max_order_set __initdata;
static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
BUG_ON(!PageSlab(page));
return page->slab_cache;
}
static inline struct kmem_cache *virt_to_cache(const void *obj) static inline struct kmem_cache *virt_to_cache(const void *obj)
{ {
struct page *page = virt_to_head_page(obj); struct page *page = virt_to_head_page(obj);
...@@ -585,9 +570,9 @@ static struct arraycache_init initarray_generic = ...@@ -585,9 +570,9 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */ /* internal cache of cache description objs */
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache cache_cache = { static struct kmem_cache kmem_cache_boot = {
.nodelists = cache_cache_nodelists, .nodelists = kmem_cache_nodelists,
.batchcount = 1, .batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES, .limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1, .shared = 1,
...@@ -810,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -810,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
*left_over = slab_size - nr_objs*buffer_size - mgmt_size; *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
} }
#if DEBUG
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
static void __slab_error(const char *function, struct kmem_cache *cachep, static void __slab_error(const char *function, struct kmem_cache *cachep,
...@@ -818,7 +804,9 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, ...@@ -818,7 +804,9 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg); function, cachep->name, msg);
dump_stack(); dump_stack();
add_taint(TAINT_BAD_PAGE);
} }
#endif
/* /*
* By default on NUMA we use alien caches to stage the freeing of * By default on NUMA we use alien caches to stage the freeing of
...@@ -1601,15 +1589,17 @@ void __init kmem_cache_init(void) ...@@ -1601,15 +1589,17 @@ void __init kmem_cache_init(void)
int order; int order;
int node; int node;
kmem_cache = &kmem_cache_boot;
if (num_possible_nodes() == 1) if (num_possible_nodes() == 1)
use_alien_caches = 0; use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++) { for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]); kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES) if (i < MAX_NUMNODES)
cache_cache.nodelists[i] = NULL; kmem_cache->nodelists[i] = NULL;
} }
set_up_list3s(&cache_cache, CACHE_CACHE); set_up_list3s(kmem_cache, CACHE_CACHE);
/* /*
* Fragmentation resistance on low memory - only use bigger * Fragmentation resistance on low memory - only use bigger
...@@ -1621,9 +1611,9 @@ void __init kmem_cache_init(void) ...@@ -1621,9 +1611,9 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated /* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet: * from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the struct * 1) initialize the kmem_cache cache: it contains the struct
* kmem_cache structures of all caches, except cache_cache itself: * kmem_cache structures of all caches, except kmem_cache itself:
* cache_cache is statically allocated. * kmem_cache is statically allocated.
* Initially an __init data area is used for the head array and the * Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated * kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap. * array at the end of the bootstrap.
...@@ -1632,43 +1622,43 @@ void __init kmem_cache_init(void) ...@@ -1632,43 +1622,43 @@ void __init kmem_cache_init(void)
* An __init data area is used for the head array. * An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized * 3) Create the remaining kmalloc caches, with minimally sized
* head arrays. * head arrays.
* 4) Replace the __init data head arrays for cache_cache and the first * 4) Replace the __init data head arrays for kmem_cache and the first
* kmalloc cache with kmalloc allocated arrays. * kmalloc cache with kmalloc allocated arrays.
* 5) Replace the __init data for kmem_list3 for cache_cache and * 5) Replace the __init data for kmem_list3 for kmem_cache and
* the other cache's with kmalloc allocated memory. * the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes. * 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/ */
node = numa_mem_id(); node = numa_mem_id();
/* 1) create the cache_cache */ /* 1) create the kmem_cache */
INIT_LIST_HEAD(&slab_caches); INIT_LIST_HEAD(&slab_caches);
list_add(&cache_cache.list, &slab_caches); list_add(&kmem_cache->list, &slab_caches);
cache_cache.colour_off = cache_line_size(); kmem_cache->colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache; kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/* /*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/ */
cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *); nr_node_ids * sizeof(struct kmem_list3 *);
cache_cache.object_size = cache_cache.size; kmem_cache->object_size = kmem_cache->size;
cache_cache.size = ALIGN(cache_cache.size, kmem_cache->size = ALIGN(kmem_cache->object_size,
cache_line_size()); cache_line_size());
cache_cache.reciprocal_buffer_size = kmem_cache->reciprocal_buffer_size =
reciprocal_value(cache_cache.size); reciprocal_value(kmem_cache->size);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.size, cache_estimate(order, kmem_cache->size,
cache_line_size(), 0, &left_over, &cache_cache.num); cache_line_size(), 0, &left_over, &kmem_cache->num);
if (cache_cache.num) if (kmem_cache->num)
break; break;
} }
BUG_ON(!cache_cache.num); BUG_ON(!kmem_cache->num);
cache_cache.gfporder = order; kmem_cache->gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off; kmem_cache->colour = left_over / kmem_cache->colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size()); sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */ /* 2+3) create the kmalloc caches */
...@@ -1681,19 +1671,22 @@ void __init kmem_cache_init(void) ...@@ -1681,19 +1671,22 @@ void __init kmem_cache_init(void)
* bug. * bug.
*/ */
sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
sizes[INDEX_AC].cs_size, sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
ARCH_KMALLOC_MINALIGN, sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
NULL); sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep = sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(names[INDEX_L3].name, sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
sizes[INDEX_L3].cs_size, sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
ARCH_KMALLOC_MINALIGN, sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
NULL); __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
} }
slab_early_init = 0; slab_early_init = 0;
...@@ -1707,20 +1700,23 @@ void __init kmem_cache_init(void) ...@@ -1707,20 +1700,23 @@ void __init kmem_cache_init(void)
* allow tighter packing of the smaller caches. * allow tighter packing of the smaller caches.
*/ */
if (!sizes->cs_cachep) { if (!sizes->cs_cachep) {
sizes->cs_cachep = __kmem_cache_create(names->name, sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
sizes->cs_size, sizes->cs_cachep->name = names->name;
ARCH_KMALLOC_MINALIGN, sizes->cs_cachep->size = sizes->cs_size;
ARCH_KMALLOC_FLAGS|SLAB_PANIC, sizes->cs_cachep->object_size = sizes->cs_size;
NULL); sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes->cs_cachep->list, &slab_caches);
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = __kmem_cache_create( sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
names->name_dma, sizes->cs_dmacachep->name = names->name_dma;
sizes->cs_size, sizes->cs_dmacachep->size = sizes->cs_size;
ARCH_KMALLOC_MINALIGN, sizes->cs_dmacachep->object_size = sizes->cs_size;
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
SLAB_PANIC, __kmem_cache_create(sizes->cs_dmacachep,
NULL); ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
list_add(&sizes->cs_dmacachep->list, &slab_caches);
#endif #endif
sizes++; sizes++;
names++; names++;
...@@ -1731,15 +1727,15 @@ void __init kmem_cache_init(void) ...@@ -1731,15 +1727,15 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache), memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init)); sizeof(struct arraycache_init));
/* /*
* Do not assume that spinlocks can be initialized via memcpy: * Do not assume that spinlocks can be initialized via memcpy:
*/ */
spin_lock_init(&ptr->lock); spin_lock_init(&ptr->lock);
cache_cache.array[smp_processor_id()] = ptr; kmem_cache->array[smp_processor_id()] = ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
...@@ -1760,7 +1756,7 @@ void __init kmem_cache_init(void) ...@@ -1760,7 +1756,7 @@ void __init kmem_cache_init(void)
int nid; int nid;
for_each_online_node(nid) { for_each_online_node(nid) {
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep, init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid); &initkmem_list3[SIZE_AC + nid], nid);
...@@ -1781,9 +1777,6 @@ void __init kmem_cache_init_late(void) ...@@ -1781,9 +1777,6 @@ void __init kmem_cache_init_late(void)
slab_state = UP; slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
list_for_each_entry(cachep, &slab_caches, list) list_for_each_entry(cachep, &slab_caches, list)
...@@ -1791,6 +1784,9 @@ void __init kmem_cache_init_late(void) ...@@ -1791,6 +1784,9 @@ void __init kmem_cache_init_late(void)
BUG(); BUG();
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* Done! */ /* Done! */
slab_state = FULL; slab_state = FULL;
...@@ -2209,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) ...@@ -2209,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
} }
} }
static void __kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
for_each_online_cpu(i)
kfree(cachep->array[i]);
/* NUMA: free the list3 structures */
for_each_online_node(i) {
l3 = cachep->nodelists[i];
if (l3) {
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
}
}
kmem_cache_free(&cache_cache, cachep);
}
/** /**
* calculate_slab_order - calculate size (page order) of slabs * calculate_slab_order - calculate size (page order) of slabs
* @cachep: pointer to the cache that is being created * @cachep: pointer to the cache that is being created
...@@ -2366,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2366,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* Cannot be called within a int, but can be interrupted. * Cannot be called within a int, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache. * The @ctor is run when new pages are allocated by the cache.
* *
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
*
* The flags are * The flags are
* *
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
...@@ -2381,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2381,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* cacheline. This can be beneficial if you're counting cycles as closely * cacheline. This can be beneficial if you're counting cycles as closely
* as davem. * as davem.
*/ */
struct kmem_cache * int
__kmem_cache_create (const char *name, size_t size, size_t align, __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
unsigned long flags, void (*ctor)(void *))
{ {
size_t left_over, slab_size, ralign; size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL;
gfp_t gfp; gfp_t gfp;
int err;
size_t size = cachep->size;
#if DEBUG #if DEBUG
#if FORCED_DEBUG #if FORCED_DEBUG
...@@ -2459,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2459,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
ralign = ARCH_SLAB_MINALIGN; ralign = ARCH_SLAB_MINALIGN;
} }
/* 3) caller mandated alignment */ /* 3) caller mandated alignment */
if (ralign < align) { if (ralign < cachep->align) {
ralign = align; ralign = cachep->align;
} }
/* disable debug if necessary */ /* disable debug if necessary */
if (ralign > __alignof__(unsigned long long)) if (ralign > __alignof__(unsigned long long))
...@@ -2468,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2468,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
/* /*
* 4) Store it. * 4) Store it.
*/ */
align = ralign; cachep->align = ralign;
if (slab_is_available()) if (slab_is_available())
gfp = GFP_KERNEL; gfp = GFP_KERNEL;
else else
gfp = GFP_NOWAIT; gfp = GFP_NOWAIT;
/* Get cache's description obj. */
cachep = kmem_cache_zalloc(&cache_cache, gfp);
if (!cachep)
return NULL;
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
cachep->object_size = size;
cachep->align = align;
#if DEBUG #if DEBUG
/* /*
...@@ -2506,8 +2471,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2506,8 +2471,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
&& cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { && cachep->object_size > cache_line_size()
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); && ALIGN(size, cachep->align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
size = PAGE_SIZE; size = PAGE_SIZE;
} }
#endif #endif
...@@ -2527,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2527,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
*/ */
flags |= CFLGS_OFF_SLAB; flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, align); size = ALIGN(size, cachep->align);
left_over = calculate_slab_order(cachep, size, align, flags); left_over = calculate_slab_order(cachep, size, cachep->align, flags);
if (!cachep->num)
return -E2BIG;
if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
return NULL;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align); + sizeof(struct slab), cachep->align);
/* /*
* If the slab has been placed off-slab, and we have enough space then * If the slab has been placed off-slab, and we have enough space then
...@@ -2566,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2566,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
cachep->colour_off = cache_line_size(); cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */ /* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align) if (cachep->colour_off < cachep->align)
cachep->colour_off = align; cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off; cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size; cachep->slab_size = slab_size;
cachep->flags = flags; cachep->flags = flags;
...@@ -2588,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2588,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
*/ */
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
} }
cachep->ctor = ctor;
cachep->name = name;
if (setup_cpu_cache(cachep, gfp)) { err = setup_cpu_cache(cachep, gfp);
__kmem_cache_destroy(cachep); if (err) {
return NULL; __kmem_cache_shutdown(cachep);
return err;
} }
if (flags & SLAB_DEBUG_OBJECTS) { if (flags & SLAB_DEBUG_OBJECTS) {
...@@ -2606,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2606,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
slab_set_debugobj_lock_classes(cachep); slab_set_debugobj_lock_classes(cachep);
} }
/* cache setup completed, link it into the list */ return 0;
list_add(&cachep->list, &slab_caches);
return cachep;
} }
#if DEBUG #if DEBUG
...@@ -2767,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep) ...@@ -2767,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
/** int __kmem_cache_shutdown(struct kmem_cache *cachep)
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
* Remove a &struct kmem_cache object from the slab cache.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
* cache being allocated each time a module is loaded and unloaded, if the
* module doesn't have persistent in-kernel storage across loads and unloads.
*
* The cache must be empty before calling this function.
*
* The caller must guarantee that no one will allocate memory from the cache
* during the kmem_cache_destroy().
*/
void kmem_cache_destroy(struct kmem_cache *cachep)
{ {
BUG_ON(!cachep || in_interrupt()); int i;
struct kmem_list3 *l3;
int rc = __cache_shrink(cachep);
/* Find the cache in the chain of caches. */ if (rc)
get_online_cpus(); return rc;
mutex_lock(&slab_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
list_add(&cachep->list, &slab_caches);
mutex_unlock(&slab_mutex);
put_online_cpus();
return;
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) for_each_online_cpu(i)
rcu_barrier(); kfree(cachep->array[i]);
__kmem_cache_destroy(cachep); /* NUMA: free the list3 structures */
mutex_unlock(&slab_mutex); for_each_online_node(i) {
put_online_cpus(); l3 = cachep->nodelists[i];
if (l3) {
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
}
}
return 0;
} }
EXPORT_SYMBOL(kmem_cache_destroy);
/* /*
* Get the memory for a slab management obj. * Get the memory for a slab management obj.
...@@ -3098,7 +3038,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) ...@@ -3098,7 +3038,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
} }
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct page *page; struct page *page;
unsigned int objnr; unsigned int objnr;
...@@ -3118,7 +3058,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -3118,7 +3058,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone2(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE;
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
objnr = obj_to_index(cachep, slabp, objp); objnr = obj_to_index(cachep, slabp, objp);
...@@ -3131,7 +3071,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -3131,7 +3071,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller); store_stackinfo(cachep, objp, caller);
kernel_map_pages(virt_to_page(objp), kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 0); cachep->size / PAGE_SIZE, 0);
} else { } else {
...@@ -3285,7 +3225,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, ...@@ -3285,7 +3225,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
#if DEBUG #if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, void *caller) gfp_t flags, void *objp, unsigned long caller)
{ {
if (!objp) if (!objp)
return objp; return objp;
...@@ -3302,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3302,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
poison_obj(cachep, objp, POISON_INUSE); poison_obj(cachep, objp, POISON_INUSE);
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
...@@ -3343,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3343,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{ {
if (cachep == &cache_cache) if (cachep == kmem_cache)
return false; return false;
return should_failslab(cachep->object_size, flags, cachep->flags); return should_failslab(cachep->object_size, flags, cachep->flags);
...@@ -3576,8 +3516,8 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3576,8 +3516,8 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
* Fallback to other node is possible if __GFP_THISNODE is not set. * Fallback to other node is possible if __GFP_THISNODE is not set.
*/ */
static __always_inline void * static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller) unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
...@@ -3663,7 +3603,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3663,7 +3603,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
static __always_inline void * static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *objp; void *objp;
...@@ -3799,7 +3739,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3799,7 +3739,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
* be in this state _before_ it is released. Called with disabled ints. * be in this state _before_ it is released. Called with disabled ints.
*/ */
static inline void __cache_free(struct kmem_cache *cachep, void *objp, static inline void __cache_free(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct array_cache *ac = cpu_cache_get(cachep); struct array_cache *ac = cpu_cache_get(cachep);
...@@ -3839,7 +3779,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, ...@@ -3839,7 +3779,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
*/ */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); void *ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
...@@ -3850,14 +3790,14 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -3850,14 +3790,14 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void * void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{ {
void *ret; void *ret;
ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret,
size, slab_buffer_size(cachep), flags); size, cachep->size, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
...@@ -3866,8 +3806,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); ...@@ -3866,8 +3806,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = __cache_alloc_node(cachep, flags, nodeid, void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
__builtin_return_address(0));
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
...@@ -3878,17 +3817,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3878,17 +3817,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(size_t size, void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid,
size_t size)
{ {
void *ret; void *ret;
ret = __cache_alloc_node(cachep, flags, nodeid, ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
__builtin_return_address(0));
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, slab_buffer_size(cachep), size, cachep->size,
flags, nodeid); flags, nodeid);
return ret; return ret;
} }
...@@ -3896,34 +3835,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); ...@@ -3896,34 +3835,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif #endif
static __always_inline void * static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags); cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size);
} }
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, return __do_kmalloc_node(size, flags, node, _RET_IP_);
__builtin_return_address(0));
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, flags, node, (void *)caller); return __do_kmalloc_node(size, flags, node, caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else #else
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, NULL); return __do_kmalloc_node(size, flags, node, 0);
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
...@@ -3936,7 +3874,7 @@ EXPORT_SYMBOL(__kmalloc_node); ...@@ -3936,7 +3874,7 @@ EXPORT_SYMBOL(__kmalloc_node);
* @caller: function caller for debug tracking of the caller * @caller: function caller for debug tracking of the caller
*/ */
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller) unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *ret; void *ret;
...@@ -3949,9 +3887,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3949,9 +3887,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags); cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
ret = __cache_alloc(cachep, flags, caller); ret = slab_alloc(cachep, flags, caller);
trace_kmalloc((unsigned long) caller, ret, trace_kmalloc(caller, ret,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
...@@ -3961,20 +3899,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ...@@ -3961,20 +3899,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, __builtin_return_address(0)); return __do_kmalloc(size, flags, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{ {
return __do_kmalloc(size, flags, (void *)caller); return __do_kmalloc(size, flags, caller);
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
#else #else
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, NULL); return __do_kmalloc(size, flags, 0);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
#endif #endif
...@@ -3995,7 +3933,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) ...@@ -3995,7 +3933,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed(objp, cachep->object_size); debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size); debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, __builtin_return_address(0)); __cache_free(cachep, objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
trace_kmem_cache_free(_RET_IP_, objp); trace_kmem_cache_free(_RET_IP_, objp);
...@@ -4026,7 +3964,7 @@ void kfree(const void *objp) ...@@ -4026,7 +3964,7 @@ void kfree(const void *objp)
debug_check_no_locks_freed(objp, c->object_size); debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size);
__cache_free(c, (void *)objp, __builtin_return_address(0)); __cache_free(c, (void *)objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
......
...@@ -25,9 +25,26 @@ extern enum slab_state slab_state; ...@@ -25,9 +25,26 @@ extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */ /* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex; extern struct mutex slab_mutex;
/* The list of all slab caches on the system */
extern struct list_head slab_caches; extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, /* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)); size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
int __kmem_cache_shutdown(struct kmem_cache *);
#endif #endif
...@@ -22,6 +22,53 @@ ...@@ -22,6 +22,53 @@
enum slab_state slab_state; enum slab_state slab_state;
LIST_HEAD(slab_caches); LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex); DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
struct kmem_cache *s = NULL;
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
list_for_each_entry(s, &slab_caches, list) {
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
pr_err("Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
s = NULL;
return -EINVAL;
}
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0;
}
#else
static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
return 0;
}
#endif
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
...@@ -52,68 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -52,68 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
unsigned long flags, void (*ctor)(void *)) unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s = NULL; struct kmem_cache *s = NULL;
int err = 0;
#ifdef CONFIG_DEBUG_VM
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "kmem_cache_create(%s) integrity check"
" failed\n", name);
goto out;
}
#endif
get_online_cpus(); get_online_cpus();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
#ifdef CONFIG_DEBUG_VM if (!kmem_cache_sanity_check(name, size) == 0)
list_for_each_entry(s, &slab_caches, list) { goto out_locked;
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
printk(KERN_ERR
"Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) { s = __kmem_cache_alias(name, size, align, flags, ctor);
printk(KERN_ERR "kmem_cache_create(%s): Cache name" if (s)
" already exists.\n", goto out_locked;
name);
dump_stack(); s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
s = NULL; if (s) {
goto oops; s->object_size = s->size = size;
s->align = align;
s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
err = -ENOMEM;
goto out_locked;
} }
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */ err = __kmem_cache_create(s, flags);
#endif if (!err) {
s = __kmem_cache_create(name, size, align, flags, ctor); s->refcount = 1;
list_add(&s->list, &slab_caches);
#ifdef CONFIG_DEBUG_VM } else {
oops: kfree(s->name);
#endif kmem_cache_free(kmem_cache, s);
}
} else
err = -ENOMEM;
out_locked:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
#ifdef CONFIG_DEBUG_VM if (err) {
out:
#endif if (flags & SLAB_PANIC)
if (!s && (flags & SLAB_PANIC)) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
panic("kmem_cache_create: Failed to create slab '%s'\n", name); name, err);
else {
printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
name, err);
dump_stack();
}
return NULL;
}
return s; return s;
} }
EXPORT_SYMBOL(kmem_cache_create); EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *s)
{
get_online_cpus();
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
if (!__kmem_cache_shutdown(s)) {
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
kfree(s->name);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
s->name);
dump_stack();
}
}
mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
int slab_is_available(void) int slab_is_available(void)
{ {
return slab_state >= UP; return slab_state >= UP;
......
...@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) ...@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void *page; void *page;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != -1) if (node != NUMA_NO_NODE)
page = alloc_pages_exact_node(node, gfp, order); page = alloc_pages_exact_node(node, gfp, order);
else else
#endif #endif
...@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
* page with a matching node id in the freelist. * page with a matching node id in the freelist.
*/ */
if (node != -1 && page_to_nid(sp) != node) if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
continue; continue;
#endif #endif
/* Enough room on this page? */ /* Enough room on this page? */
...@@ -425,7 +425,8 @@ static void slob_free(void *block, int size) ...@@ -425,7 +425,8 @@ static void slob_free(void *block, int size)
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/ */
void *__kmalloc_node(size_t size, gfp_t gfp, int node) static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{ {
unsigned int *m; unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
...@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*m = size; *m = size;
ret = (void *)m + align; ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, size + align, gfp, node); size, size + align, gfp, node);
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
...@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page->private = size; page->private = size;
} }
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node); size, PAGE_SIZE << order, gfp, node);
} }
kmemleak_alloc(ret, size, 1, gfp); kmemleak_alloc(ret, size, 1, gfp);
return ret; return ret;
} }
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
return __do_kmalloc_node(size, gfp, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#ifdef CONFIG_TRACING
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
}
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, node, caller);
}
#endif
#endif
void kfree(const void *block) void kfree(const void *block)
{ {
struct page *sp; struct page *sp;
...@@ -508,44 +529,24 @@ size_t ksize(const void *block) ...@@ -508,44 +529,24 @@ size_t ksize(const void *block)
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(ksize);
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
size_t align, unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *c; size_t align = c->size;
c = slob_alloc(sizeof(struct kmem_cache),
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
if (c) { if (flags & SLAB_DESTROY_BY_RCU) {
c->name = name; /* leave room for rcu footer at the end of object */
c->size = size; c->size += sizeof(struct slob_rcu);
if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
c->ctor = ctor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
c->align = ARCH_SLAB_MINALIGN;
if (c->align < align)
c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
c->refcount = 1;
} }
return c; c->flags = flags;
} /* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
c->align = ARCH_SLAB_MINALIGN;
if (c->align < align)
c->align = align;
void kmem_cache_destroy(struct kmem_cache *c) return 0;
{
kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
} }
EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{ {
...@@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c) ...@@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
} }
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
return 0;
}
int kmem_cache_shrink(struct kmem_cache *d) int kmem_cache_shrink(struct kmem_cache *d)
{ {
return 0; return 0;
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
struct kmem_cache kmem_cache_boot = {
.name = "kmem_cache",
.size = sizeof(struct kmem_cache),
.flags = SLAB_PANIC,
.align = ARCH_KMALLOC_MINALIGN,
};
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
kmem_cache = &kmem_cache_boot;
slab_state = UP; slab_state = UP;
} }
......
...@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *); ...@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; } { return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s) static inline void sysfs_slab_remove(struct kmem_cache *s) { }
{
kfree(s->name);
kfree(s);
}
#endif #endif
...@@ -568,6 +564,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) ...@@ -568,6 +564,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
printk(KERN_ERR "----------------------------------------" printk(KERN_ERR "----------------------------------------"
"-------------------------------------\n\n"); "-------------------------------------\n\n");
add_taint(TAINT_BAD_PAGE);
} }
static void slab_fix(struct kmem_cache *s, char *fmt, ...) static void slab_fix(struct kmem_cache *s, char *fmt, ...)
...@@ -624,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page, ...@@ -624,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object); print_trailer(s, page, object);
} }
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
{ {
va_list args; va_list args;
char buf[100]; char buf[100];
...@@ -1069,13 +1067,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa ...@@ -1069,13 +1067,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
return 0; return 0;
} }
static noinline int free_debug_processing(struct kmem_cache *s, static noinline struct kmem_cache_node *free_debug_processing(
struct page *page, void *object, unsigned long addr) struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags)
{ {
unsigned long flags; struct kmem_cache_node *n = get_node(s, page_to_nid(page));
int rc = 0;
local_irq_save(flags); spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page); slab_lock(page);
if (!check_slab(s, page)) if (!check_slab(s, page))
...@@ -1113,15 +1111,19 @@ static noinline int free_debug_processing(struct kmem_cache *s, ...@@ -1113,15 +1111,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
set_track(s, object, TRACK_FREE, addr); set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0); trace(s, page, object, 0);
init_object(s, object, SLUB_RED_INACTIVE); init_object(s, object, SLUB_RED_INACTIVE);
rc = 1;
out: out:
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); /*
return rc; * Keep node_lock to preserve integrity
* until the object is actually freed
*/
return n;
fail: fail:
slab_unlock(page);
spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object); slab_fix(s, "Object at 0x%p not freed", object);
goto out; return NULL;
} }
static int __init setup_slub_debug(char *str) static int __init setup_slub_debug(char *str)
...@@ -1214,8 +1216,9 @@ static inline void setup_object_debug(struct kmem_cache *s, ...@@ -1214,8 +1216,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
static inline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s, static inline struct kmem_cache_node *free_debug_processing(
struct page *page, void *object, unsigned long addr) { return 0; } struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags) { return NULL; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; } { return 1; }
...@@ -1714,7 +1717,7 @@ static inline void note_cmpxchg_failure(const char *n, ...@@ -1714,7 +1717,7 @@ static inline void note_cmpxchg_failure(const char *n,
stat(s, CMPXCHG_DOUBLE_CPU_FAIL); stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
} }
void init_kmem_cache_cpus(struct kmem_cache *s) static void init_kmem_cache_cpus(struct kmem_cache *s)
{ {
int cpu; int cpu;
...@@ -1939,7 +1942,7 @@ static void unfreeze_partials(struct kmem_cache *s) ...@@ -1939,7 +1942,7 @@ static void unfreeze_partials(struct kmem_cache *s)
* If we did not find a slot then simply move all the partials to the * If we did not find a slot then simply move all the partials to the
* per node partial list. * per node partial list.
*/ */
int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{ {
struct page *oldpage; struct page *oldpage;
int pages; int pages;
...@@ -1962,6 +1965,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -1962,6 +1965,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_save(flags); local_irq_save(flags);
unfreeze_partials(s); unfreeze_partials(s);
local_irq_restore(flags); local_irq_restore(flags);
oldpage = NULL;
pobjects = 0; pobjects = 0;
pages = 0; pages = 0;
stat(s, CPU_PARTIAL_DRAIN); stat(s, CPU_PARTIAL_DRAIN);
...@@ -2310,7 +2314,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2310,7 +2314,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* *
* Otherwise we can simply pick the next object from the lockless free list. * Otherwise we can simply pick the next object from the lockless free list.
*/ */
static __always_inline void *slab_alloc(struct kmem_cache *s, static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr) gfp_t gfpflags, int node, unsigned long addr)
{ {
void **object; void **object;
...@@ -2380,9 +2384,15 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -2380,9 +2384,15 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return object; return object;
} }
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
...@@ -2393,7 +2403,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -2393,7 +2403,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
return ret; return ret;
} }
...@@ -2411,7 +2421,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); ...@@ -2411,7 +2421,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
s->object_size, s->size, gfpflags, node); s->object_size, s->size, gfpflags, node);
...@@ -2425,7 +2435,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, ...@@ -2425,7 +2435,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags, gfp_t gfpflags,
int node, size_t size) int node, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node); size, s->size, gfpflags, node);
...@@ -2457,7 +2467,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2457,7 +2467,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat(s, FREE_SLOWPATH); stat(s, FREE_SLOWPATH);
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) if (kmem_cache_debug(s) &&
!(n = free_debug_processing(s, page, x, addr, &flags)))
return; return;
do { do {
...@@ -2612,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x) ...@@ -2612,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (kmem_cache_debug(s) && page->slab != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab->name, s->name);
WARN_ON_ONCE(1);
return;
}
slab_free(s, page, x, _RET_IP_); slab_free(s, page, x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x); trace_kmem_cache_free(_RET_IP_, x);
...@@ -3026,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3026,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
} }
static int kmem_cache_open(struct kmem_cache *s, static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *))
{ {
memset(s, 0, kmem_size); s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->name = name;
s->ctor = ctor;
s->object_size = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
s->reserved = 0; s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
...@@ -3098,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -3098,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
else else
s->cpu_partial = 30; s->cpu_partial = 30;
s->refcount = 1;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000; s->remote_node_defrag_ratio = 1000;
#endif #endif
...@@ -3106,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -3106,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
goto error; goto error;
if (alloc_kmem_cache_cpus(s)) if (alloc_kmem_cache_cpus(s))
return 1; return 0;
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u " panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n", "order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo), s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
s->offset, flags); s->offset, flags);
return 0; return -EINVAL;
} }
/* /*
...@@ -3137,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, ...@@ -3137,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
sizeof(long), GFP_ATOMIC); sizeof(long), GFP_ATOMIC);
if (!map) if (!map)
return; return;
slab_err(s, page, "%s", text); slab_err(s, page, text, s->name);
slab_lock(page); slab_lock(page);
get_map(s, page, map); get_map(s, page, map);
...@@ -3169,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3169,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
discard_slab(s, page); discard_slab(s, page);
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()"); "Objects remaining in %s on kmem_cache_close()");
} }
} }
} }
...@@ -3182,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s) ...@@ -3182,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int node; int node;
flush_all(s); flush_all(s);
free_percpu(s->cpu_slab);
/* Attempt to free all objects */ /* Attempt to free all objects */
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
...@@ -3191,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s) ...@@ -3191,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
if (n->nr_partial || slabs_node(s, node)) if (n->nr_partial || slabs_node(s, node))
return 1; return 1;
} }
free_percpu(s->cpu_slab);
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
return 0; return 0;
} }
/* int __kmem_cache_shutdown(struct kmem_cache *s)
* Close a cache and release the kmem_cache structure
* (must be used for caches created using kmem_cache_create)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{ {
mutex_lock(&slab_mutex); int rc = kmem_cache_close(s);
s->refcount--;
if (!s->refcount) { if (!rc)
list_del(&s->list);
mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
dump_stack();
}
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
sysfs_slab_remove(s); sysfs_slab_remove(s);
} else
mutex_unlock(&slab_mutex); return rc;
} }
EXPORT_SYMBOL(kmem_cache_destroy);
/******************************************************************** /********************************************************************
* Kmalloc subsystem * Kmalloc subsystem
...@@ -3226,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy); ...@@ -3226,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static struct kmem_cache *kmem_cache;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif #endif
...@@ -3273,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, ...@@ -3273,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
{ {
struct kmem_cache *s; struct kmem_cache *s;
s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
s->size = s->object_size = size;
s->align = ARCH_KMALLOC_MINALIGN;
/* /*
* This function is called with IRQs disabled during early-boot on * This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here. * single CPU so there's no need to take slab_mutex here.
*/ */
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, if (kmem_cache_open(s, flags))
flags, NULL))
goto panic; goto panic;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
...@@ -3362,7 +3358,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -3362,7 +3358,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); ret = slab_alloc(s, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
...@@ -3405,7 +3401,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -3405,7 +3401,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, flags, node, _RET_IP_); ret = slab_alloc_node(s, flags, node, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
...@@ -3482,7 +3478,7 @@ void kfree(const void *x) ...@@ -3482,7 +3478,7 @@ void kfree(const void *x)
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kmemleak_free(x); kmemleak_free(x);
put_page(page); __free_pages(page, compound_order(page));
return; return;
} }
slab_free(page->slab, page, object, _RET_IP_); slab_free(page->slab, page, object, _RET_IP_);
...@@ -3719,12 +3715,12 @@ void __init kmem_cache_init(void) ...@@ -3719,12 +3715,12 @@ void __init kmem_cache_init(void)
slub_max_order = 0; slub_max_order = 0;
kmem_size = offsetof(struct kmem_cache, node) + kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *); nr_node_ids * sizeof(struct kmem_cache_node *);
/* Allocate two kmem_caches from the page allocator */ /* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size()); kmalloc_size = ALIGN(kmem_size, cache_line_size());
order = get_order(2 * kmalloc_size); order = get_order(2 * kmalloc_size);
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
/* /*
* Must first have the slab cache available for the allocations of the * Must first have the slab cache available for the allocations of the
...@@ -3733,9 +3729,10 @@ void __init kmem_cache_init(void) ...@@ -3733,9 +3729,10 @@ void __init kmem_cache_init(void)
*/ */
kmem_cache_node = (void *)kmem_cache + kmalloc_size; kmem_cache_node = (void *)kmem_cache + kmalloc_size;
kmem_cache_open(kmem_cache_node, "kmem_cache_node", kmem_cache_node->name = "kmem_cache_node";
sizeof(struct kmem_cache_node), kmem_cache_node->size = kmem_cache_node->object_size =
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); sizeof(struct kmem_cache_node);
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
...@@ -3743,8 +3740,10 @@ void __init kmem_cache_init(void) ...@@ -3743,8 +3740,10 @@ void __init kmem_cache_init(void)
slab_state = PARTIAL; slab_state = PARTIAL;
temp_kmem_cache = kmem_cache; temp_kmem_cache = kmem_cache;
kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, kmem_cache->name = "kmem_cache";
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); kmem_cache->size = kmem_cache->object_size = kmem_size;
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size); memcpy(kmem_cache, temp_kmem_cache, kmem_size);
...@@ -3933,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size, ...@@ -3933,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
return NULL; return NULL;
} }
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)) size_t align, unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s; struct kmem_cache *s;
char *n;
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
...@@ -3951,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3951,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (sysfs_slab_alias(s, name)) { if (sysfs_slab_alias(s, name)) {
s->refcount--; s->refcount--;
return NULL; s = NULL;
} }
return s;
} }
n = kstrdup(name, GFP_KERNEL); return s;
if (!n) }
return NULL;
s = kmalloc(kmem_size, GFP_KERNEL); int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
if (s) { {
if (kmem_cache_open(s, n, int err;
size, align, flags, ctor)) {
int r;
list_add(&s->list, &slab_caches); err = kmem_cache_open(s, flags);
mutex_unlock(&slab_mutex); if (err)
r = sysfs_slab_add(s); return err;
mutex_lock(&slab_mutex);
if (!r) mutex_unlock(&slab_mutex);
return s; err = sysfs_slab_add(s);
mutex_lock(&slab_mutex);
list_del(&s->list); if (err)
kmem_cache_close(s); kmem_cache_close(s);
}
kfree(s); return err;
}
kfree(n);
return NULL;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -4033,7 +4024,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -4033,7 +4024,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); ret = slab_alloc(s, gfpflags, caller);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags); trace_kmalloc(caller, ret, size, s->size, gfpflags);
...@@ -4063,7 +4054,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -4063,7 +4054,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, gfpflags, node, caller); ret = slab_alloc_node(s, gfpflags, node, caller);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
...@@ -5210,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj, ...@@ -5210,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
return err; return err;
} }
static void kmem_cache_release(struct kobject *kobj)
{
struct kmem_cache *s = to_slab(kobj);
kfree(s->name);
kfree(s);
}
static const struct sysfs_ops slab_sysfs_ops = { static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show, .show = slab_attr_show,
.store = slab_attr_store, .store = slab_attr_store,
...@@ -5225,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = { ...@@ -5225,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
static struct kobj_type slab_ktype = { static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops, .sysfs_ops = &slab_sysfs_ops,
.release = kmem_cache_release
}; };
static int uevent_filter(struct kset *kset, struct kobject *kobj) static int uevent_filter(struct kset *kset, struct kobject *kobj)
......
...@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len) ...@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len)
} }
EXPORT_SYMBOL(memdup_user); EXPORT_SYMBOL(memdup_user);
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
gfp_t flags)
{
void *ret;
size_t ks = 0;
if (p)
ks = ksize(p);
if (ks >= new_size)
return (void *)p;
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
memcpy(ret, p, ks);
return ret;
}
/** /**
* __krealloc - like krealloc() but don't free @p. * __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for. * @p: object to reallocate memory for.
...@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user); ...@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user);
*/ */
void *__krealloc(const void *p, size_t new_size, gfp_t flags) void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{ {
void *ret;
size_t ks = 0;
if (unlikely(!new_size)) if (unlikely(!new_size))
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
if (p) return __do_krealloc(p, new_size, flags);
ks = ksize(p);
if (ks >= new_size)
return (void *)p;
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
memcpy(ret, p, ks);
return ret;
} }
EXPORT_SYMBOL(__krealloc); EXPORT_SYMBOL(__krealloc);
...@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) ...@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
} }
ret = __krealloc(p, new_size, flags); ret = __do_krealloc(p, new_size, flags);
if (ret && p != ret) if (ret && p != ret)
kfree(p); kfree(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment