Commit f4178cdd authored by Pekka Enberg's avatar Pekka Enberg

Merge branch 'slab/common-for-cgroups' into slab/for-linus

Fix up a trivial conflict with NUMA_NO_NODE cleanups.

Conflicts:
	mm/slob.c
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parents 023dc704 f28510d3
This diff is collapsed.
......@@ -25,9 +25,26 @@ extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
/* The list of all slab caches on the system */
extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
int __kmem_cache_shutdown(struct kmem_cache *);
#endif
......@@ -22,6 +22,7 @@
enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
......@@ -98,21 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s = NULL;
int err = 0;
get_online_cpus();
mutex_lock(&slab_mutex);
if (kmem_cache_sanity_check(name, size) == 0)
s = __kmem_cache_create(name, size, align, flags, ctor);
if (!kmem_cache_sanity_check(name, size) == 0)
goto out_locked;
s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
goto out_locked;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (s) {
s->object_size = s->size = size;
s->align = align;
s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
err = -ENOMEM;
goto out_locked;
}
err = __kmem_cache_create(s, flags);
if (!err) {
s->refcount = 1;
list_add(&s->list, &slab_caches);
} else {
kfree(s->name);
kmem_cache_free(kmem_cache, s);
}
} else
err = -ENOMEM;
out_locked:
mutex_unlock(&slab_mutex);
put_online_cpus();
if (!s && (flags & SLAB_PANIC))
panic("kmem_cache_create: Failed to create slab '%s'\n", name);
if (err) {
if (flags & SLAB_PANIC)
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
name, err);
else {
printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
name, err);
dump_stack();
}
return NULL;
}
return s;
}
EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *s)
{
get_online_cpus();
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
if (!__kmem_cache_shutdown(s)) {
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
kfree(s->name);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
s->name);
dump_stack();
}
}
mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
int slab_is_available(void)
{
return slab_state >= UP;
......
......@@ -529,23 +529,15 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
struct kmem_cache *c;
size_t align = c->size;
c = slob_alloc(sizeof(struct kmem_cache),
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);
if (c) {
c->name = name;
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
c->ctor = ctor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
......@@ -553,20 +545,8 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (c->align < align)
c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
c->refcount = 1;
}
return c;
}
void kmem_cache_destroy(struct kmem_cache *c)
{
kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
return 0;
}
EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
......@@ -634,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);
int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
return 0;
}
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
struct kmem_cache kmem_cache_boot = {
.name = "kmem_cache",
.size = sizeof(struct kmem_cache),
.flags = SLAB_PANIC,
.align = ARCH_KMALLOC_MINALIGN,
};
void __init kmem_cache_init(void)
{
kmem_cache = &kmem_cache_boot;
slab_state = UP;
}
......
......@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
kfree(s->name);
kfree(s);
}
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
#endif
......@@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
{
va_list args;
char buf[100];
......@@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
if (kmem_cache_debug(s) && page->slab != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab->name, s->name);
WARN_ON_ONCE(1);
return;
}
slab_free(s, page, x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
......@@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
}
static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *))
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
s->object_size = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
......@@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
else
s->cpu_partial = 30;
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
......@@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
goto error;
if (alloc_kmem_cache_cpus(s))
return 1;
return 0;
free_kmem_cache_nodes(s);
error:
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo),
s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
s->offset, flags);
return 0;
return -EINVAL;
}
/*
......@@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
sizeof(long), GFP_ATOMIC);
if (!map)
return;
slab_err(s, page, "%s", text);
slab_err(s, page, text, s->name);
slab_lock(page);
get_map(s, page, map);
......@@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
discard_slab(s, page);
} else {
list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()");
"Objects remaining in %s on kmem_cache_close()");
}
}
}
......@@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int node;
flush_all(s);
free_percpu(s->cpu_slab);
/* Attempt to free all objects */
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
......@@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
if (n->nr_partial || slabs_node(s, node))
return 1;
}
free_percpu(s->cpu_slab);
free_kmem_cache_nodes(s);
return 0;
}
/*
* Close a cache and release the kmem_cache structure
* (must be used for caches created using kmem_cache_create)
*/
void kmem_cache_destroy(struct kmem_cache *s)
int __kmem_cache_shutdown(struct kmem_cache *s)
{
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
dump_stack();
}
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
int rc = kmem_cache_close(s);
if (!rc)
sysfs_slab_remove(s);
} else
mutex_unlock(&slab_mutex);
return rc;
}
EXPORT_SYMBOL(kmem_cache_destroy);
/********************************************************************
* Kmalloc subsystem
......@@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches);
static struct kmem_cache *kmem_cache;
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
......@@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
{
struct kmem_cache *s;
s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
s->size = s->object_size = size;
s->align = ARCH_KMALLOC_MINALIGN;
/*
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here.
*/
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
if (kmem_cache_open(s, flags))
goto panic;
list_add(&s->list, &slab_caches);
......@@ -3739,7 +3720,7 @@ void __init kmem_cache_init(void)
/* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size());
order = get_order(2 * kmalloc_size);
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
/*
* Must first have the slab cache available for the allocations of the
......@@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
*/
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
kmem_cache_open(kmem_cache_node, "kmem_cache_node",
sizeof(struct kmem_cache_node),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
kmem_cache_node->name = "kmem_cache_node";
kmem_cache_node->size = kmem_cache_node->object_size =
sizeof(struct kmem_cache_node);
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
......@@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
slab_state = PARTIAL;
temp_kmem_cache = kmem_cache;
kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
kmem_cache->name = "kmem_cache";
kmem_cache->size = kmem_cache->object_size = kmem_size;
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
......@@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
return NULL;
}
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
char *n;
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
......@@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (sysfs_slab_alias(s, name)) {
s->refcount--;
return NULL;
s = NULL;
}
return s;
}
n = kstrdup(name, GFP_KERNEL);
if (!n)
return NULL;
return s;
}
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
int r;
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
int err;
err = kmem_cache_open(s, flags);
if (err)
return err;
list_add(&s->list, &slab_caches);
mutex_unlock(&slab_mutex);
r = sysfs_slab_add(s);
err = sysfs_slab_add(s);
mutex_lock(&slab_mutex);
if (!r)
return s;
list_del(&s->list);
if (err)
kmem_cache_close(s);
}
kfree(s);
}
kfree(n);
return NULL;
return err;
}
#ifdef CONFIG_SMP
......@@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
return err;
}
static void kmem_cache_release(struct kobject *kobj)
{
struct kmem_cache *s = to_slab(kobj);
kfree(s->name);
kfree(s);
}
static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
......@@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
.release = kmem_cache_release
};
static int uevent_filter(struct kset *kset, struct kobject *kobj)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment