Commit d797b7d0 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/slab: simplify memcg cache creation

Because the number of non-root kmem_caches doesn't depend on the number of
memory cgroups anymore and is generally not very big, there is no more
need for a dedicated workqueue.

Also, as there is no more need to pass any arguments to the
memcg_create_kmem_cache() except the root kmem_cache, it's possible to
just embed the work structure into the kmem_cache and avoid the dynamic
allocation of the work structure.

This will also simplify the synchronization: for each root kmem_cache
there is only one work.  So there will be no more concurrent attempts to
create a non-root kmem_cache for a root kmem_cache: the second and all
following attempts to queue the work will fail.

On the kmem_cache destruction path there is no more need to call the
expensive flush_workqueue() and wait for all pending works to be finished.
Instead, cancel_work_sync() can be used to cancel/wait for only one work.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-14-guro@fb.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9855609b
...@@ -1418,7 +1418,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); ...@@ -1418,7 +1418,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
extern struct static_key_false memcg_kmem_enabled_key; extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
extern int memcg_nr_cache_ids; extern int memcg_nr_cache_ids;
void memcg_get_cache_ids(void); void memcg_get_cache_ids(void);
......
...@@ -399,8 +399,6 @@ void memcg_put_cache_ids(void) ...@@ -399,8 +399,6 @@ void memcg_put_cache_ids(void)
*/ */
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
EXPORT_SYMBOL(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key);
struct workqueue_struct *memcg_kmem_cache_wq;
#endif #endif
static int memcg_shrinker_map_size; static int memcg_shrinker_map_size;
...@@ -2902,39 +2900,6 @@ static void memcg_free_cache_id(int id) ...@@ -2902,39 +2900,6 @@ static void memcg_free_cache_id(int id)
ida_simple_remove(&memcg_cache_ida, id); ida_simple_remove(&memcg_cache_ida, id);
} }
struct memcg_kmem_cache_create_work {
struct kmem_cache *cachep;
struct work_struct work;
};
static void memcg_kmem_cache_create_func(struct work_struct *w)
{
struct memcg_kmem_cache_create_work *cw =
container_of(w, struct memcg_kmem_cache_create_work, work);
struct kmem_cache *cachep = cw->cachep;
memcg_create_kmem_cache(cachep);
kfree(cw);
}
/*
* Enqueue the creation of a per-memcg kmem_cache.
*/
static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep)
{
struct memcg_kmem_cache_create_work *cw;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
if (!cw)
return;
cw->cachep = cachep;
INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
queue_work(memcg_kmem_cache_wq, &cw->work);
}
/** /**
* memcg_kmem_get_cache: select memcg or root cache for allocation * memcg_kmem_get_cache: select memcg or root cache for allocation
* @cachep: the original global kmem cache * @cachep: the original global kmem cache
...@@ -2951,7 +2916,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2951,7 +2916,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
if (unlikely(!memcg_cachep)) { if (unlikely(!memcg_cachep)) {
memcg_schedule_kmem_cache_create(cachep); queue_work(system_wq, &cachep->memcg_params.work);
return cachep; return cachep;
} }
...@@ -7022,17 +6987,6 @@ static int __init mem_cgroup_init(void) ...@@ -7022,17 +6987,6 @@ static int __init mem_cgroup_init(void)
{ {
int cpu, node; int cpu, node;
#ifdef CONFIG_MEMCG_KMEM
/*
* Kmem cache creation is mostly done with the slab_mutex held,
* so use a workqueue with limited concurrency to avoid stalling
* all worker threads in case lots of cgroups are created and
* destroyed simultaneously.
*/
memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
BUG_ON(!memcg_kmem_cache_wq);
#endif
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead); memcg_hotplug_cpu_dead);
......
...@@ -45,12 +45,14 @@ struct kmem_cache { ...@@ -45,12 +45,14 @@ struct kmem_cache {
* @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
* cgroups. * cgroups.
* @root_caches_node: list node for slab_root_caches list. * @root_caches_node: list node for slab_root_caches list.
* @work: work struct used to create the non-root cache.
*/ */
struct memcg_cache_params { struct memcg_cache_params {
struct kmem_cache *root_cache; struct kmem_cache *root_cache;
struct kmem_cache *memcg_cache; struct kmem_cache *memcg_cache;
struct list_head __root_caches_node; struct list_head __root_caches_node;
struct work_struct work;
}; };
#endif /* CONFIG_SLOB */ #endif /* CONFIG_SLOB */
......
...@@ -134,10 +134,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, ...@@ -134,10 +134,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
LIST_HEAD(slab_root_caches); LIST_HEAD(slab_root_caches);
static void memcg_kmem_cache_create_func(struct work_struct *work)
{
struct kmem_cache *cachep = container_of(work, struct kmem_cache,
memcg_params.work);
memcg_create_kmem_cache(cachep);
}
void slab_init_memcg_params(struct kmem_cache *s) void slab_init_memcg_params(struct kmem_cache *s)
{ {
s->memcg_params.root_cache = NULL; s->memcg_params.root_cache = NULL;
s->memcg_params.memcg_cache = NULL; s->memcg_params.memcg_cache = NULL;
INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func);
} }
static void init_memcg_params(struct kmem_cache *s, static void init_memcg_params(struct kmem_cache *s,
...@@ -586,15 +594,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) ...@@ -586,15 +594,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
return 0; return 0;
} }
static void flush_memcg_workqueue(struct kmem_cache *s) static void cancel_memcg_cache_creation(struct kmem_cache *s)
{ {
/* cancel_work_sync(&s->memcg_params.work);
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
* deactivates the memcg kmem_caches through workqueue. Make sure all
* previous workitems on workqueue are processed.
*/
if (likely(memcg_kmem_cache_wq))
flush_workqueue(memcg_kmem_cache_wq);
} }
#else #else
static inline int shutdown_memcg_caches(struct kmem_cache *s) static inline int shutdown_memcg_caches(struct kmem_cache *s)
...@@ -602,7 +604,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) ...@@ -602,7 +604,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
return 0; return 0;
} }
static inline void flush_memcg_workqueue(struct kmem_cache *s) static inline void cancel_memcg_cache_creation(struct kmem_cache *s)
{ {
} }
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
...@@ -621,7 +623,7 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -621,7 +623,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s)) if (unlikely(!s))
return; return;
flush_memcg_workqueue(s); cancel_memcg_cache_creation(s);
get_online_cpus(); get_online_cpus();
get_online_mems(); get_online_mems();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment