Commit 272911a4 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/slab: remove memcg_kmem_get_cache()

The memcg_kmem_get_cache() function became really trivial, so let's just
inline it into the single call point: memcg_slab_pre_alloc_hook().

It will make the code less bulky and can also help the compiler to
generate a better code.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d797b7d0
...@@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, ...@@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
} }
#endif #endif
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
unsigned int nr_pages); unsigned int nr_pages);
......
...@@ -393,7 +393,7 @@ void memcg_put_cache_ids(void) ...@@ -393,7 +393,7 @@ void memcg_put_cache_ids(void)
/* /*
* A lot of the calls to the cache allocation functions are expected to be * A lot of the calls to the cache allocation functions are expected to be
* inlined by the compiler. Since the calls to memcg_kmem_get_cache are * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
* conditional to this static branch, we'll have to allow modules that does * conditional to this static branch, we'll have to allow modules that does
* kmem_cache_alloc and the such to see this symbol as well * kmem_cache_alloc and the such to see this symbol as well
*/ */
...@@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id) ...@@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id)
ida_simple_remove(&memcg_cache_ida, id); ida_simple_remove(&memcg_cache_ida, id);
} }
/**
* memcg_kmem_get_cache: select memcg or root cache for allocation
* @cachep: the original global kmem cache
*
* Return the kmem_cache we're supposed to use for a slab allocation.
*
* If the cache does not exist yet, if we are the first user of it, we
* create it asynchronously in a workqueue and let the current allocation
* go through with the original cache.
*/
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
{
struct kmem_cache *memcg_cachep;
memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
if (unlikely(!memcg_cachep)) {
queue_work(system_wq, &cachep->memcg_params.work);
return cachep;
}
return memcg_cachep;
}
/** /**
* __memcg_kmem_charge: charge a number of kernel pages to a memcg * __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge * @memcg: memory cgroup to charge
......
...@@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, ...@@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
if (memcg_kmem_bypass()) if (memcg_kmem_bypass())
return s; return s;
cachep = memcg_kmem_get_cache(s); cachep = READ_ONCE(s->memcg_params.memcg_cache);
if (is_root_cache(cachep)) if (unlikely(!cachep)) {
/*
* If memcg cache does not exist yet, we schedule it's
* asynchronous creation and let the current allocation
* go through with the root cache.
*/
queue_work(system_wq, &s->memcg_params.work);
return s; return s;
}
objcg = get_obj_cgroup_from_current(); objcg = get_obj_cgroup_from_current();
if (!objcg) if (!objcg)
......
...@@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache) ...@@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache)
} }
/* /*
* Since readers won't lock (see memcg_kmem_get_cache()), we need a * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
* barrier here to ensure nobody will see the kmem_cache partially * barrier here to ensure nobody will see the kmem_cache partially
* initialized. * initialized.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment