Commit f0a3a24b authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/slab: rework non-root kmem_cache lifecycle management

Currently each charged slab page holds a reference to the cgroup to which
it's charged.  Kmem_caches are held by the memcg and are released all
together with the memory cgroup.  It means that none of kmem_caches are
released unless at least one reference to the memcg exists, which is very
far from optimal.

Let's rework it in a way that allows releasing individual kmem_caches as
soon as the cgroup is offline, the kmem_cache is empty and there are no
pending allocations.

To make it possible, let's introduce a new percpu refcounter for non-root
kmem caches.  The counter is initialized to the percpu mode, and is
switched to the atomic mode during kmem_cache deactivation.  The counter
is bumped for every charged page and also for every running allocation.
So the kmem_cache can't be released unless all allocations complete.

To shutdown non-active empty kmem_caches, let's reuse the work queue,
previously used for the kmem_cache deactivation.  Once the reference
counter reaches 0, let's schedule an asynchronous kmem_cache release.

* I used the following simple approach to test the performance
(stolen from another patchset by T. Harding):

    time find / -name fname-no-exist
    echo 2 > /proc/sys/vm/drop_caches
    repeat 10 times

Results:

        orig		patched

real	0m1.455s	real	0m1.355s
user	0m0.206s	user	0m0.219s
sys	0m0.855s	sys	0m0.807s

real	0m1.487s	real	0m1.699s
user	0m0.221s	user	0m0.256s
sys	0m0.806s	sys	0m0.948s

real	0m1.515s	real	0m1.505s
user	0m0.183s	user	0m0.215s
sys	0m0.876s	sys	0m0.858s

real	0m1.291s	real	0m1.380s
user	0m0.193s	user	0m0.198s
sys	0m0.843s	sys	0m0.786s

real	0m1.364s	real	0m1.374s
user	0m0.180s	user	0m0.182s
sys	0m0.868s	sys	0m0.806s

real	0m1.352s	real	0m1.312s
user	0m0.201s	user	0m0.212s
sys	0m0.820s	sys	0m0.761s

real	0m1.302s	real	0m1.349s
user	0m0.205s	user	0m0.203s
sys	0m0.803s	sys	0m0.792s

real	0m1.334s	real	0m1.301s
user	0m0.194s	user	0m0.201s
sys	0m0.806s	sys	0m0.779s

real	0m1.426s	real	0m1.434s
user	0m0.216s	user	0m0.181s
sys	0m0.824s	sys	0m0.864s

real	0m1.350s	real	0m1.295s
user	0m0.200s	user	0m0.190s
sys	0m0.842s	sys	0m0.811s

So it looks like the difference is not noticeable in this test.

[cai@lca.pw: fix an use-after-free in kmemcg_workfn()]
  Link: http://lkml.kernel.org/r/1560977573-10715-1-git-send-email-cai@lca.pw
Link: http://lkml.kernel.org/r/20190611231813.3148843-9-guro@fb.comSigned-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarQian Cai <cai@lca.pw>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Andrei Vagin <avagin@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 63b02ef7
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/overflow.h> #include <linux/overflow.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
/* /*
...@@ -152,7 +153,6 @@ int kmem_cache_shrink(struct kmem_cache *); ...@@ -152,7 +153,6 @@ int kmem_cache_shrink(struct kmem_cache *);
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
void memcg_deactivate_kmem_caches(struct mem_cgroup *); void memcg_deactivate_kmem_caches(struct mem_cgroup *);
void memcg_destroy_kmem_caches(struct mem_cgroup *);
/* /*
* Please use this macro to create slab caches. Simply specify the * Please use this macro to create slab caches. Simply specify the
...@@ -642,6 +642,7 @@ struct memcg_cache_params { ...@@ -642,6 +642,7 @@ struct memcg_cache_params {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct list_head children_node; struct list_head children_node;
struct list_head kmem_caches_node; struct list_head kmem_caches_node;
struct percpu_ref refcnt;
void (*work_fn)(struct kmem_cache *); void (*work_fn)(struct kmem_cache *);
union { union {
......
...@@ -2667,12 +2667,13 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, ...@@ -2667,12 +2667,13 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
{ {
struct memcg_kmem_cache_create_work *cw; struct memcg_kmem_cache_create_work *cw;
if (!css_tryget_online(&memcg->css))
return;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
if (!cw) if (!cw)
return; return;
css_get(&memcg->css);
cw->memcg = memcg; cw->memcg = memcg;
cw->cachep = cachep; cw->cachep = cachep;
INIT_WORK(&cw->work, memcg_kmem_cache_create_func); INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
...@@ -2707,6 +2708,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2707,6 +2708,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep; struct kmem_cache *memcg_cachep;
struct memcg_cache_array *arr;
int kmemcg_id; int kmemcg_id;
VM_BUG_ON(!is_root_cache(cachep)); VM_BUG_ON(!is_root_cache(cachep));
...@@ -2714,14 +2716,28 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2714,14 +2716,28 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
if (memcg_kmem_bypass()) if (memcg_kmem_bypass())
return cachep; return cachep;
memcg = get_mem_cgroup_from_current(); rcu_read_lock();
if (unlikely(current->active_memcg))
memcg = current->active_memcg;
else
memcg = mem_cgroup_from_task(current);
if (!memcg || memcg == root_mem_cgroup)
goto out_unlock;
kmemcg_id = READ_ONCE(memcg->kmemcg_id); kmemcg_id = READ_ONCE(memcg->kmemcg_id);
if (kmemcg_id < 0) if (kmemcg_id < 0)
goto out; goto out_unlock;
memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); arr = rcu_dereference(cachep->memcg_params.memcg_caches);
if (likely(memcg_cachep))
return memcg_cachep; /*
* Make sure we will access the up-to-date value. The code updating
* memcg_caches issues a write barrier to match the data dependency
* barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
*/
memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
/* /*
* If we are in a safe context (can wait, and not in interrupt * If we are in a safe context (can wait, and not in interrupt
...@@ -2734,10 +2750,20 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2734,10 +2750,20 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
* memcg_create_kmem_cache, this means no further allocation * memcg_create_kmem_cache, this means no further allocation
* could happen with the slab_mutex held. So it's better to * could happen with the slab_mutex held. So it's better to
* defer everything. * defer everything.
*/ *
* If the memcg is dying or memcg_cache is about to be released,
* don't bother creating new kmem_caches. Because memcg_cachep
* is ZEROed as the fist step of kmem offlining, we don't need
* percpu_ref_tryget_live() here. css_tryget_online() check in
* memcg_schedule_kmem_cache_create() will prevent us from
* creation of a new kmem_cache.
*/
if (unlikely(!memcg_cachep))
memcg_schedule_kmem_cache_create(memcg, cachep); memcg_schedule_kmem_cache_create(memcg, cachep);
out: else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
css_put(&memcg->css); cachep = memcg_cachep;
out_unlock:
rcu_read_unlock();
return cachep; return cachep;
} }
...@@ -2748,7 +2774,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2748,7 +2774,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
void memcg_kmem_put_cache(struct kmem_cache *cachep) void memcg_kmem_put_cache(struct kmem_cache *cachep)
{ {
if (!is_root_cache(cachep)) if (!is_root_cache(cachep))
css_put(&cachep->memcg_params.memcg->css); percpu_ref_put(&cachep->memcg_params.refcnt);
} }
/** /**
...@@ -3295,7 +3321,7 @@ static void memcg_free_kmem(struct mem_cgroup *memcg) ...@@ -3295,7 +3321,7 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
memcg_offline_kmem(memcg); memcg_offline_kmem(memcg);
if (memcg->kmem_state == KMEM_ALLOCATED) { if (memcg->kmem_state == KMEM_ALLOCATED) {
memcg_destroy_kmem_caches(memcg); WARN_ON(!list_empty(&memcg->kmem_caches));
static_branch_dec(&memcg_kmem_enabled_key); static_branch_dec(&memcg_kmem_enabled_key);
WARN_ON(page_counter_read(&memcg->kmem)); WARN_ON(page_counter_read(&memcg->kmem));
} }
......
...@@ -248,31 +248,6 @@ static inline const char *cache_name(struct kmem_cache *s) ...@@ -248,31 +248,6 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name; return s->name;
} }
/*
* Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
* That said the caller must assure the memcg's cache won't go away by either
* taking a css reference to the owner cgroup, or holding the slab_mutex.
*/
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
struct kmem_cache *cachep;
struct memcg_cache_array *arr;
rcu_read_lock();
arr = rcu_dereference(s->memcg_params.memcg_caches);
/*
* Make sure we will access the up-to-date value. The code updating
* memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()).
*/
cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock();
return cachep;
}
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{ {
if (is_root_cache(s)) if (is_root_cache(s))
...@@ -284,14 +259,25 @@ static __always_inline int memcg_charge_slab(struct page *page, ...@@ -284,14 +259,25 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order, gfp_t gfp, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
int ret;
if (is_root_cache(s)) if (is_root_cache(s))
return 0; return 0;
return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
if (ret)
return ret;
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
return 0;
} }
static __always_inline void memcg_uncharge_slab(struct page *page, int order, static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
if (!is_root_cache(s))
percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
memcg_kmem_uncharge(page, order); memcg_kmem_uncharge(page, order);
} }
...@@ -323,12 +309,6 @@ static inline const char *cache_name(struct kmem_cache *s) ...@@ -323,12 +309,6 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name; return s->name;
} }
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
return NULL;
}
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{ {
return s; return s;
......
...@@ -132,6 +132,8 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, ...@@ -132,6 +132,8 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
LIST_HEAD(slab_root_caches); LIST_HEAD(slab_root_caches);
static DEFINE_SPINLOCK(memcg_kmem_wq_lock); static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
void slab_init_memcg_params(struct kmem_cache *s) void slab_init_memcg_params(struct kmem_cache *s)
{ {
s->memcg_params.root_cache = NULL; s->memcg_params.root_cache = NULL;
...@@ -146,6 +148,12 @@ static int init_memcg_params(struct kmem_cache *s, ...@@ -146,6 +148,12 @@ static int init_memcg_params(struct kmem_cache *s,
struct memcg_cache_array *arr; struct memcg_cache_array *arr;
if (root_cache) { if (root_cache) {
int ret = percpu_ref_init(&s->memcg_params.refcnt,
kmemcg_cache_shutdown,
0, GFP_KERNEL);
if (ret)
return ret;
s->memcg_params.root_cache = root_cache; s->memcg_params.root_cache = root_cache;
INIT_LIST_HEAD(&s->memcg_params.children_node); INIT_LIST_HEAD(&s->memcg_params.children_node);
INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
...@@ -171,6 +179,8 @@ static void destroy_memcg_params(struct kmem_cache *s) ...@@ -171,6 +179,8 @@ static void destroy_memcg_params(struct kmem_cache *s)
{ {
if (is_root_cache(s)) if (is_root_cache(s))
kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
else
percpu_ref_exit(&s->memcg_params.refcnt);
} }
static void free_memcg_params(struct rcu_head *rcu) static void free_memcg_params(struct rcu_head *rcu)
...@@ -226,6 +236,7 @@ void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) ...@@ -226,6 +236,7 @@ void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
if (is_root_cache(s)) { if (is_root_cache(s)) {
list_add(&s->root_caches_node, &slab_root_caches); list_add(&s->root_caches_node, &slab_root_caches);
} else { } else {
css_get(&memcg->css);
s->memcg_params.memcg = memcg; s->memcg_params.memcg = memcg;
list_add(&s->memcg_params.children_node, list_add(&s->memcg_params.children_node,
&s->memcg_params.root_cache->memcg_params.children); &s->memcg_params.root_cache->memcg_params.children);
...@@ -241,6 +252,7 @@ static void memcg_unlink_cache(struct kmem_cache *s) ...@@ -241,6 +252,7 @@ static void memcg_unlink_cache(struct kmem_cache *s)
} else { } else {
list_del(&s->memcg_params.children_node); list_del(&s->memcg_params.children_node);
list_del(&s->memcg_params.kmem_caches_node); list_del(&s->memcg_params.kmem_caches_node);
css_put(&s->memcg_params.memcg->css);
} }
} }
#else #else
...@@ -678,7 +690,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -678,7 +690,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
} }
/* /*
* Since readers won't lock (see cache_from_memcg_idx()), we need a * Since readers won't lock (see memcg_kmem_get_cache()), we need a
* barrier here to ensure nobody will see the kmem_cache partially * barrier here to ensure nobody will see the kmem_cache partially
* initialized. * initialized.
*/ */
...@@ -701,16 +713,11 @@ static void kmemcg_workfn(struct work_struct *work) ...@@ -701,16 +713,11 @@ static void kmemcg_workfn(struct work_struct *work)
get_online_mems(); get_online_mems();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
s->memcg_params.work_fn(s); s->memcg_params.work_fn(s);
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_mems(); put_online_mems();
put_online_cpus(); put_online_cpus();
/* done, put the ref from kmemcg_cache_deactivate() */
css_put(&s->memcg_params.memcg->css);
} }
static void kmemcg_rcufn(struct rcu_head *head) static void kmemcg_rcufn(struct rcu_head *head)
...@@ -727,10 +734,38 @@ static void kmemcg_rcufn(struct rcu_head *head) ...@@ -727,10 +734,38 @@ static void kmemcg_rcufn(struct rcu_head *head)
queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
} }
static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
{
WARN_ON(shutdown_cache(s));
}
static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
{
struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
memcg_params.refcnt);
unsigned long flags;
spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
if (s->memcg_params.root_cache->memcg_params.dying)
goto unlock;
s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
unlock:
spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
}
static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
{
__kmemcg_cache_deactivate_after_rcu(s);
percpu_ref_kill(&s->memcg_params.refcnt);
}
static void kmemcg_cache_deactivate(struct kmem_cache *s) static void kmemcg_cache_deactivate(struct kmem_cache *s)
{ {
if (WARN_ON_ONCE(is_root_cache(s)) || if (WARN_ON_ONCE(is_root_cache(s)))
WARN_ON_ONCE(s->memcg_params.work_fn))
return; return;
__kmemcg_cache_deactivate(s); __kmemcg_cache_deactivate(s);
...@@ -744,10 +779,7 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s) ...@@ -744,10 +779,7 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s)
if (s->memcg_params.root_cache->memcg_params.dying) if (s->memcg_params.root_cache->memcg_params.dying)
goto unlock; goto unlock;
/* pin memcg so that @s doesn't get destroyed in the middle */ s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
css_get(&s->memcg_params.memcg->css);
s->memcg_params.work_fn = __kmemcg_cache_deactivate_after_rcu;
call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
unlock: unlock:
spin_unlock_irq(&memcg_kmem_wq_lock); spin_unlock_irq(&memcg_kmem_wq_lock);
...@@ -781,28 +813,6 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) ...@@ -781,28 +813,6 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
put_online_cpus(); put_online_cpus();
} }
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
{
struct kmem_cache *s, *s2;
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
memcg_params.kmem_caches_node) {
/*
* The cgroup is about to be freed and therefore has no charges
* left. Hence, all its caches must be empty by now.
*/
BUG_ON(shutdown_cache(s));
}
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
}
static int shutdown_memcg_caches(struct kmem_cache *s) static int shutdown_memcg_caches(struct kmem_cache *s)
{ {
struct memcg_cache_array *arr; struct memcg_cache_array *arr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment