Commit fb2f2b0a authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/slab: reparent memcg kmem_caches on cgroup removal

Let's reparent non-root kmem_caches on memcg offlining.  This allows us to
release the memory cgroup without waiting for the last outstanding kernel
object (e.g.  dentry used by another application).

Since the parent cgroup is already charged, everything we need to do is to
splice the list of kmem_caches to the parent's kmem_caches list, swap the
memcg pointer, drop the css refcounter for each kmem_cache and adjust the
parent's css refcounter.

Please, note that kmem_cache->memcg_params.memcg isn't a stable pointer
anymore.  It's safe to read it under rcu_read_lock(), cgroup_mutex held,
or any other way that protects the memory cgroup from being released.

We can race with the slab allocation and deallocation paths.  It's not a
big problem: parent's charge and slab global stats are always correct, and
we don't care anymore about the child usage and global stats.  The child
cgroup is already offline, so we don't use or show it anywhere.

Local slab stats (NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE) aren't
used anywhere except count_shadow_nodes().  But even there it won't break
anything: after reparenting "nodes" will be 0 on child level (because
we're already reparenting shrinker lists), and on parent level page stats
always were 0, and this patch won't change anything.

[guro@fb.com: properly handle kmem_caches reparented to root_mem_cgroup]
  Link: http://lkml.kernel.org/r/20190620213427.1691847-1-guro@fb.com
Link: http://lkml.kernel.org/r/20190611231813.3148843-11-guro@fb.comSigned-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Waiman Long <longman@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Andrei Vagin <avagin@gmail.com>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4d96ba35
...@@ -152,7 +152,7 @@ void kmem_cache_destroy(struct kmem_cache *); ...@@ -152,7 +152,7 @@ void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *);
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *); void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
/* /*
* Please use this macro to create slab caches. Simply specify the * Please use this macro to create slab caches. Simply specify the
......
...@@ -3284,15 +3284,15 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) ...@@ -3284,15 +3284,15 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
*/ */
memcg->kmem_state = KMEM_ALLOCATED; memcg->kmem_state = KMEM_ALLOCATED;
memcg_deactivate_kmem_caches(memcg);
kmemcg_id = memcg->kmemcg_id;
BUG_ON(kmemcg_id < 0);
parent = parent_mem_cgroup(memcg); parent = parent_mem_cgroup(memcg);
if (!parent) if (!parent)
parent = root_mem_cgroup; parent = root_mem_cgroup;
memcg_deactivate_kmem_caches(memcg, parent);
kmemcg_id = memcg->kmemcg_id;
BUG_ON(kmemcg_id < 0);
/* /*
* Change kmemcg_id of this cgroup and all its descendants to the * Change kmemcg_id of this cgroup and all its descendants to the
* parent's id, and then move all entries from this cgroup's list_lrus * parent's id, and then move all entries from this cgroup's list_lrus
...@@ -3325,7 +3325,6 @@ static void memcg_free_kmem(struct mem_cgroup *memcg) ...@@ -3325,7 +3325,6 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
if (memcg->kmem_state == KMEM_ALLOCATED) { if (memcg->kmem_state == KMEM_ALLOCATED) {
WARN_ON(!list_empty(&memcg->kmem_caches)); WARN_ON(!list_empty(&memcg->kmem_caches));
static_branch_dec(&memcg_kmem_enabled_key); static_branch_dec(&memcg_kmem_enabled_key);
WARN_ON(page_counter_read(&memcg->kmem));
} }
} }
#else #else
...@@ -4773,6 +4772,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ...@@ -4773,6 +4772,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
/* The following stuff does not apply to the root */ /* The following stuff does not apply to the root */
if (!parent) { if (!parent) {
#ifdef CONFIG_MEMCG_KMEM
INIT_LIST_HEAD(&memcg->kmem_caches);
#endif
root_mem_cgroup = memcg; root_mem_cgroup = memcg;
return &memcg->css; return &memcg->css;
} }
......
...@@ -261,6 +261,9 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) ...@@ -261,6 +261,9 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
* which do not have slab_cache pointer set. * which do not have slab_cache pointer set.
* So this function assumes that the page can pass PageHead() and PageSlab() * So this function assumes that the page can pass PageHead() and PageSlab()
* checks. * checks.
*
* The kmem_cache can be reparented asynchronously. The caller must ensure
* the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
*/ */
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
{ {
...@@ -268,7 +271,7 @@ static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) ...@@ -268,7 +271,7 @@ static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
s = READ_ONCE(page->slab_cache); s = READ_ONCE(page->slab_cache);
if (s && !is_root_cache(s)) if (s && !is_root_cache(s))
return s->memcg_params.memcg; return READ_ONCE(s->memcg_params.memcg);
return NULL; return NULL;
} }
...@@ -285,10 +288,22 @@ static __always_inline int memcg_charge_slab(struct page *page, ...@@ -285,10 +288,22 @@ static __always_inline int memcg_charge_slab(struct page *page,
struct lruvec *lruvec; struct lruvec *lruvec;
int ret; int ret;
memcg = s->memcg_params.memcg; rcu_read_lock();
memcg = READ_ONCE(s->memcg_params.memcg);
while (memcg && !css_tryget_online(&memcg->css))
memcg = parent_mem_cgroup(memcg);
rcu_read_unlock();
if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
(1 << order));
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
return 0;
}
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
if (ret) if (ret)
return ret; goto out;
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
...@@ -296,8 +311,9 @@ static __always_inline int memcg_charge_slab(struct page *page, ...@@ -296,8 +311,9 @@ static __always_inline int memcg_charge_slab(struct page *page,
/* transer try_charge() page references to kmem_cache */ /* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
css_put_many(&memcg->css, 1 << order); css_put_many(&memcg->css, 1 << order);
out:
return 0; css_put(&memcg->css);
return ret;
} }
/* /*
...@@ -310,10 +326,17 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, ...@@ -310,10 +326,17 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct lruvec *lruvec; struct lruvec *lruvec;
memcg = s->memcg_params.memcg; rcu_read_lock();
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); memcg = READ_ONCE(s->memcg_params.memcg);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); if (likely(!mem_cgroup_is_root(memcg))) {
memcg_kmem_uncharge_memcg(page, order, memcg); lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
memcg_kmem_uncharge_memcg(page, order, memcg);
} else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-(1 << order));
}
rcu_read_unlock();
percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
} }
......
...@@ -252,7 +252,8 @@ static void memcg_unlink_cache(struct kmem_cache *s) ...@@ -252,7 +252,8 @@ static void memcg_unlink_cache(struct kmem_cache *s)
} else { } else {
list_del(&s->memcg_params.children_node); list_del(&s->memcg_params.children_node);
list_del(&s->memcg_params.kmem_caches_node); list_del(&s->memcg_params.kmem_caches_node);
css_put(&s->memcg_params.memcg->css); mem_cgroup_put(s->memcg_params.memcg);
WRITE_ONCE(s->memcg_params.memcg, NULL);
} }
} }
#else #else
...@@ -785,11 +786,13 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s) ...@@ -785,11 +786,13 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s)
spin_unlock_irq(&memcg_kmem_wq_lock); spin_unlock_irq(&memcg_kmem_wq_lock);
} }
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
struct mem_cgroup *parent)
{ {
int idx; int idx;
struct memcg_cache_array *arr; struct memcg_cache_array *arr;
struct kmem_cache *s, *c; struct kmem_cache *s, *c;
unsigned int nr_reparented;
idx = memcg_cache_id(memcg); idx = memcg_cache_id(memcg);
...@@ -807,6 +810,18 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) ...@@ -807,6 +810,18 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
kmemcg_cache_deactivate(c); kmemcg_cache_deactivate(c);
arr->entries[idx] = NULL; arr->entries[idx] = NULL;
} }
nr_reparented = 0;
list_for_each_entry(s, &memcg->kmem_caches,
memcg_params.kmem_caches_node) {
WRITE_ONCE(s->memcg_params.memcg, parent);
css_put(&memcg->css);
nr_reparented++;
}
if (nr_reparented) {
list_splice_init(&memcg->kmem_caches,
&parent->kmem_caches);
css_get_many(&parent->css, nr_reparented);
}
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_mems(); put_online_mems();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment