Commit c7094406 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/slab: deprecate slab_root_caches

Currently there are two lists of kmem_caches:
1) slab_caches, which contains all kmem_caches,
2) slab_root_caches, which contains only root kmem_caches.

And there is some preprocessor magic to have a single list if
CONFIG_MEMCG_KMEM isn't enabled.

It was required earlier because the number of non-root kmem_caches was
proportional to the number of memory cgroups and could reach really big
values.  Now, when it cannot exceed the number of root kmem_caches, there
is really no reason to maintain two lists.

We never iterate over the slab_root_caches list on any hot paths, so it's
perfectly fine to iterate over slab_caches and filter out non-root
kmem_caches.

It allows to remove a lot of config-dependent code and two pointers from
the kmem_cache structure.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-16-guro@fb.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 272911a4
...@@ -1249,7 +1249,6 @@ void __init kmem_cache_init(void) ...@@ -1249,7 +1249,6 @@ void __init kmem_cache_init(void)
nr_node_ids * sizeof(struct kmem_cache_node *), nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN, 0, 0); SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches); list_add(&kmem_cache->list, &slab_caches);
memcg_link_cache(kmem_cache);
slab_state = PARTIAL; slab_state = PARTIAL;
/* /*
......
...@@ -44,14 +44,12 @@ struct kmem_cache { ...@@ -44,14 +44,12 @@ struct kmem_cache {
* *
* @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
* cgroups. * cgroups.
* @root_caches_node: list node for slab_root_caches list.
* @work: work struct used to create the non-root cache. * @work: work struct used to create the non-root cache.
*/ */
struct memcg_cache_params { struct memcg_cache_params {
struct kmem_cache *root_cache; struct kmem_cache *root_cache;
struct kmem_cache *memcg_cache; struct kmem_cache *memcg_cache;
struct list_head __root_caches_node;
struct work_struct work; struct work_struct work;
}; };
#endif /* CONFIG_SLOB */ #endif /* CONFIG_SLOB */
...@@ -265,11 +263,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla ...@@ -265,11 +263,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
} }
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */
extern struct list_head slab_root_caches;
#define root_caches_node memcg_params.__root_caches_node
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
{ {
return !s->memcg_params.root_cache; return !s->memcg_params.root_cache;
...@@ -447,14 +440,8 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, ...@@ -447,14 +440,8 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
} }
extern void slab_init_memcg_params(struct kmem_cache *); extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s);
#else /* CONFIG_MEMCG_KMEM */ #else /* CONFIG_MEMCG_KMEM */
/* If !memcg, all caches are root. */
#define slab_root_caches slab_caches
#define root_caches_node list
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
{ {
return true; return true;
...@@ -523,10 +510,6 @@ static inline void slab_init_memcg_params(struct kmem_cache *s) ...@@ -523,10 +510,6 @@ static inline void slab_init_memcg_params(struct kmem_cache *s)
{ {
} }
static inline void memcg_link_cache(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
static inline struct kmem_cache *virt_to_cache(const void *obj) static inline struct kmem_cache *virt_to_cache(const void *obj)
......
...@@ -131,9 +131,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, ...@@ -131,9 +131,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
} }
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
LIST_HEAD(slab_root_caches);
static void memcg_kmem_cache_create_func(struct work_struct *work) static void memcg_kmem_cache_create_func(struct work_struct *work)
{ {
struct kmem_cache *cachep = container_of(work, struct kmem_cache, struct kmem_cache *cachep = container_of(work, struct kmem_cache,
...@@ -156,27 +153,11 @@ static void init_memcg_params(struct kmem_cache *s, ...@@ -156,27 +153,11 @@ static void init_memcg_params(struct kmem_cache *s,
else else
slab_init_memcg_params(s); slab_init_memcg_params(s);
} }
void memcg_link_cache(struct kmem_cache *s)
{
if (is_root_cache(s))
list_add(&s->root_caches_node, &slab_root_caches);
}
static void memcg_unlink_cache(struct kmem_cache *s)
{
if (is_root_cache(s))
list_del(&s->root_caches_node);
}
#else #else
static inline void init_memcg_params(struct kmem_cache *s, static inline void init_memcg_params(struct kmem_cache *s,
struct kmem_cache *root_cache) struct kmem_cache *root_cache)
{ {
} }
static inline void memcg_unlink_cache(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
/* /*
...@@ -253,7 +234,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, ...@@ -253,7 +234,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (flags & SLAB_NEVER_MERGE) if (flags & SLAB_NEVER_MERGE)
return NULL; return NULL;
list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s)) if (slab_unmergeable(s))
continue; continue;
...@@ -312,7 +293,6 @@ static struct kmem_cache *create_cache(const char *name, ...@@ -312,7 +293,6 @@ static struct kmem_cache *create_cache(const char *name,
s->refcount = 1; s->refcount = 1;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
memcg_link_cache(s);
out: out:
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -507,7 +487,6 @@ static int shutdown_cache(struct kmem_cache *s) ...@@ -507,7 +487,6 @@ static int shutdown_cache(struct kmem_cache *s)
if (__kmem_cache_shutdown(s) != 0) if (__kmem_cache_shutdown(s) != 0)
return -EBUSY; return -EBUSY;
memcg_unlink_cache(s);
list_del(&s->list); list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) { if (s->flags & SLAB_TYPESAFE_BY_RCU) {
...@@ -751,7 +730,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, ...@@ -751,7 +730,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
create_boot_cache(s, name, size, flags, useroffset, usersize); create_boot_cache(s, name, size, flags, useroffset, usersize);
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
memcg_link_cache(s);
s->refcount = 1; s->refcount = 1;
return s; return s;
} }
...@@ -1107,12 +1085,12 @@ static void print_slabinfo_header(struct seq_file *m) ...@@ -1107,12 +1085,12 @@ static void print_slabinfo_header(struct seq_file *m)
void *slab_start(struct seq_file *m, loff_t *pos) void *slab_start(struct seq_file *m, loff_t *pos)
{ {
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
return seq_list_start(&slab_root_caches, *pos); return seq_list_start(&slab_caches, *pos);
} }
void *slab_next(struct seq_file *m, void *p, loff_t *pos) void *slab_next(struct seq_file *m, void *p, loff_t *pos)
{ {
return seq_list_next(p, &slab_root_caches, pos); return seq_list_next(p, &slab_caches, pos);
} }
void slab_stop(struct seq_file *m, void *p) void slab_stop(struct seq_file *m, void *p)
...@@ -1165,11 +1143,12 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) ...@@ -1165,11 +1143,12 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
static int slab_show(struct seq_file *m, void *p) static int slab_show(struct seq_file *m, void *p)
{ {
struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
if (p == slab_root_caches.next) if (p == slab_caches.next)
print_slabinfo_header(m); print_slabinfo_header(m);
cache_show(s, m); if (is_root_cache(s))
cache_show(s, m);
return 0; return 0;
} }
...@@ -1271,7 +1250,7 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) ...@@ -1271,7 +1250,7 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused)
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>"); seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
seq_puts(m, " <active_slabs> <num_slabs>\n"); seq_puts(m, " <active_slabs> <num_slabs>\n");
list_for_each_entry(s, &slab_root_caches, root_caches_node) { list_for_each_entry(s, &slab_caches, list) {
/* /*
* Skip kmem caches that don't have the memcg cache. * Skip kmem caches that don't have the memcg cache.
*/ */
......
...@@ -4360,7 +4360,6 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) ...@@ -4360,7 +4360,6 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
} }
slab_init_memcg_params(s); slab_init_memcg_params(s);
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
memcg_link_cache(s);
return s; return s;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment