Commit 5722d094 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

memcg, slab: cleanup memcg cache creation

This patch cleans up the memcg cache creation path as follows:

- Move memcg cache name creation to a separate function to be called
  from kmem_cache_create_memcg().  This allows us to get rid of the mutex
  protecting the temporary buffer used for the name formatting, because
  the whole cache creation path is protected by the slab_mutex.

- Get rid of memcg_create_kmem_cache().  This function serves as a proxy
  to kmem_cache_create_memcg().  After separating the cache name creation
  path, it would be reduced to a function call, so let's inline it.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Glauber Costa <glommer@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a44cb944
...@@ -491,6 +491,9 @@ void __memcg_kmem_commit_charge(struct page *page, ...@@ -491,6 +491,9 @@ void __memcg_kmem_commit_charge(struct page *page,
void __memcg_kmem_uncharge_pages(struct page *page, int order); void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg); int memcg_cache_id(struct mem_cgroup *memcg);
char *memcg_create_cache_name(struct mem_cgroup *memcg,
struct kmem_cache *root_cache);
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache); struct kmem_cache *root_cache);
void memcg_free_cache_params(struct kmem_cache *s); void memcg_free_cache_params(struct kmem_cache *s);
...@@ -635,6 +638,12 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) ...@@ -635,6 +638,12 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1; return -1;
} }
static inline char *memcg_create_cache_name(struct mem_cgroup *memcg,
struct kmem_cache *root_cache)
{
return NULL;
}
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
struct kmem_cache *s, struct kmem_cache *root_cache) struct kmem_cache *s, struct kmem_cache *root_cache)
{ {
......
...@@ -3094,6 +3094,29 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) ...@@ -3094,6 +3094,29 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
return 0; return 0;
} }
char *memcg_create_cache_name(struct mem_cgroup *memcg,
struct kmem_cache *root_cache)
{
static char *buf = NULL;
/*
* We need a mutex here to protect the shared buffer. Since this is
* expected to be called only on cache creation, we can employ the
* slab_mutex for that purpose.
*/
lockdep_assert_held(&slab_mutex);
if (!buf) {
buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!buf)
return NULL;
}
cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
memcg_cache_id(memcg), buf);
}
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache) struct kmem_cache *root_cache)
{ {
...@@ -3298,46 +3321,6 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) ...@@ -3298,46 +3321,6 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
schedule_work(&cachep->memcg_params->destroy); schedule_work(&cachep->memcg_params->destroy);
} }
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *s)
{
struct kmem_cache *new = NULL;
static char *tmp_path = NULL, *tmp_name = NULL;
static DEFINE_MUTEX(mutex); /* protects tmp_name */
BUG_ON(!memcg_can_account_kmem(memcg));
mutex_lock(&mutex);
/*
* kmem_cache_create_memcg duplicates the given name and
* cgroup_name for this name requires RCU context.
* This static temporary buffer is used to prevent from
* pointless shortliving allocation.
*/
if (!tmp_path || !tmp_name) {
if (!tmp_path)
tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!tmp_name)
tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!tmp_path || !tmp_name)
goto out;
}
cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
memcg_cache_id(memcg), tmp_name);
new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
(s->flags & ~SLAB_PANIC), s->ctor, s);
if (new)
new->allocflags |= __GFP_KMEMCG;
else
new = s;
out:
mutex_unlock(&mutex);
return new;
}
void kmem_cache_destroy_memcg_children(struct kmem_cache *s) void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{ {
struct kmem_cache *c; struct kmem_cache *c;
...@@ -3384,12 +3367,6 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) ...@@ -3384,12 +3367,6 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
mutex_unlock(&activate_kmem_mutex); mutex_unlock(&activate_kmem_mutex);
} }
struct create_work {
struct mem_cgroup *memcg;
struct kmem_cache *cachep;
struct work_struct work;
};
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
...@@ -3407,13 +3384,25 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) ...@@ -3407,13 +3384,25 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
mutex_unlock(&memcg->slab_caches_mutex); mutex_unlock(&memcg->slab_caches_mutex);
} }
struct create_work {
struct mem_cgroup *memcg;
struct kmem_cache *cachep;
struct work_struct work;
};
static void memcg_create_cache_work_func(struct work_struct *w) static void memcg_create_cache_work_func(struct work_struct *w)
{ {
struct create_work *cw; struct create_work *cw = container_of(w, struct create_work, work);
struct mem_cgroup *memcg = cw->memcg;
struct kmem_cache *cachep = cw->cachep;
struct kmem_cache *new;
cw = container_of(w, struct create_work, work); new = kmem_cache_create_memcg(memcg, cachep->name,
memcg_create_kmem_cache(cw->memcg, cw->cachep); cachep->object_size, cachep->align,
css_put(&cw->memcg->css); cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
if (new)
new->allocflags |= __GFP_KMEMCG;
css_put(&memcg->css);
kfree(cw); kfree(cw);
} }
......
...@@ -215,7 +215,10 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, ...@@ -215,7 +215,10 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
s->align = calculate_alignment(flags, align, size); s->align = calculate_alignment(flags, align, size);
s->ctor = ctor; s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL); if (memcg)
s->name = memcg_create_cache_name(memcg, parent_cache);
else
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) if (!s->name)
goto out_free_cache; goto out_free_cache;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment