Commit 10d5ebf4 authored by Li Zefan's avatar Li Zefan Committed by Linus Torvalds

memcg: use css_get/put when charging/uncharging kmem

Use css_get/put instead of mem_cgroup_get/put.

We can't do a simple replacement, because here mem_cgroup_put() is
called during mem_cgroup_css_free(), while mem_cgroup_css_free() won't
be called until css refcnt goes down to 0.

Instead we increment css refcnt in mem_cgroup_css_offline(), and then
check if there's still kmem charges.  If not, css refcnt will be
decremented immediately, otherwise the refcnt will be released after the
last kmem allocation is uncahred.

[akpm@linux-foundation.org: tweak comment]
Signed-off-by: default avatarLi Zefan <lizefan@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarTejun Heo <tj@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 20f05310
...@@ -406,6 +406,11 @@ static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) ...@@ -406,6 +406,11 @@ static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
{ {
/*
* Our caller must use css_get() first, because memcg_uncharge_kmem()
* will call css_put() if it sees the memcg is dead.
*/
smp_wmb();
if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
} }
...@@ -3050,8 +3055,16 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) ...@@ -3050,8 +3055,16 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
if (res_counter_uncharge(&memcg->kmem, size)) if (res_counter_uncharge(&memcg->kmem, size))
return; return;
/*
* Releases a reference taken in kmem_cgroup_css_offline in case
* this last uncharge is racing with the offlining code or it is
* outliving the memcg existence.
*
* The memory barrier imposed by test&clear is paired with the
* explicit one in memcg_kmem_mark_dead().
*/
if (memcg_kmem_test_and_clear_dead(memcg)) if (memcg_kmem_test_and_clear_dead(memcg))
mem_cgroup_put(memcg); css_put(&memcg->css);
} }
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
...@@ -5183,14 +5196,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) ...@@ -5183,14 +5196,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
* starts accounting before all call sites are patched * starts accounting before all call sites are patched
*/ */
memcg_kmem_set_active(memcg); memcg_kmem_set_active(memcg);
/*
* kmem charges can outlive the cgroup. In the case of slab
* pages, for instance, a page contain objects from various
* processes, so it is unfeasible to migrate them away. We
* need to reference count the memcg because of that.
*/
mem_cgroup_get(memcg);
} else } else
ret = res_counter_set_limit(&memcg->kmem, val); ret = res_counter_set_limit(&memcg->kmem, val);
out: out:
...@@ -5223,12 +5228,10 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) ...@@ -5223,12 +5228,10 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
goto out; goto out;
/* /*
* destroy(), called if we fail, will issue static_key_slow_inc() and * __mem_cgroup_free() will issue static_key_slow_dec() because this
* mem_cgroup_put() if kmem is enabled. We have to either call them * memcg is active already. If the later initialization fails then the
* unconditionally, or clear the KMEM_ACTIVE flag. I personally find * cgroup core triggers the cleanup so we do not have to do it here.
* this more consistent, since it always leads to the same destroy path
*/ */
mem_cgroup_get(memcg);
static_key_slow_inc(&memcg_kmem_enabled_key); static_key_slow_inc(&memcg_kmem_enabled_key);
mutex_lock(&set_limit_mutex); mutex_lock(&set_limit_mutex);
...@@ -5913,23 +5916,43 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) ...@@ -5913,23 +5916,43 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return mem_cgroup_sockets_init(memcg, ss); return mem_cgroup_sockets_init(memcg, ss);
} }
static void kmem_cgroup_destroy(struct mem_cgroup *memcg) static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{ {
mem_cgroup_sockets_destroy(memcg); mem_cgroup_sockets_destroy(memcg);
}
static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
{
if (!memcg_kmem_is_active(memcg))
return;
/*
* kmem charges can outlive the cgroup. In the case of slab
* pages, for instance, a page contain objects from various
* processes. As we prevent from taking a reference for every
* such allocation we have to be careful when doing uncharge
* (see memcg_uncharge_kmem) and here during offlining.
*
* The idea is that that only the _last_ uncharge which sees
* the dead memcg will drop the last reference. An additional
* reference is taken here before the group is marked dead
* which is then paired with css_put during uncharge resp. here.
*
* Although this might sound strange as this path is called from
* css_offline() when the referencemight have dropped down to 0
* and shouldn't be incremented anymore (css_tryget would fail)
* we do not have other options because of the kmem allocations
* lifetime.
*/
css_get(&memcg->css);
memcg_kmem_mark_dead(memcg); memcg_kmem_mark_dead(memcg);
if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
return; return;
/*
* Charges already down to 0, undo mem_cgroup_get() done in the charge
* path here, being careful not to race with memcg_uncharge_kmem: it is
* possible that the charges went down to 0 between mark_dead and the
* res_counter read, so in that case, we don't need the put
*/
if (memcg_kmem_test_and_clear_dead(memcg)) if (memcg_kmem_test_and_clear_dead(memcg))
mem_cgroup_put(memcg); css_put(&memcg->css);
} }
#else #else
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
...@@ -5937,7 +5960,11 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) ...@@ -5937,7 +5960,11 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return 0; return 0;
} }
static void kmem_cgroup_destroy(struct mem_cgroup *memcg) static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}
static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
{ {
} }
#endif #endif
...@@ -6370,6 +6397,8 @@ static void mem_cgroup_css_offline(struct cgroup *cont) ...@@ -6370,6 +6397,8 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg); mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg); mem_cgroup_reparent_charges(memcg);
mem_cgroup_destroy_all_caches(memcg); mem_cgroup_destroy_all_caches(memcg);
...@@ -6379,9 +6408,8 @@ static void mem_cgroup_css_free(struct cgroup *cont) ...@@ -6379,9 +6408,8 @@ static void mem_cgroup_css_free(struct cgroup *cont)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
kmem_cgroup_destroy(memcg); memcg_destroy_kmem(memcg);
__mem_cgroup_free(memcg);
mem_cgroup_put(memcg);
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment