Commit b4c2b231 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Linus Torvalds

mm: assign id to every memcg-aware shrinker

Introduce shrinker::id number, which is used to enumerate memcg-aware
shrinkers.  The number start from 0, and the code tries to maintain it
as small as possible.

This will be used to represent a memcg-aware shrinkers in memcg
shrinkers map.

Since all memcg-aware shrinkers are based on list_lru, which is
per-memcg in case of !CONFIG_MEMCG_KMEM only, the new functionality will
be under this config option.

[ktkhai@virtuozzo.com: v9]
  Link: http://lkml.kernel.org/r/153112546435.4097.10607140323811756557.stgit@localhost.localdomain
Link: http://lkml.kernel.org/r/153063054586.1818.6041047871606697364.stgit@localhost.localdomainSigned-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Tested-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matthias Kaehlcke <mka@chromium.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Sahitya Tummala <stummala@codeaurora.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 84c07d11
...@@ -66,6 +66,10 @@ struct shrinker { ...@@ -66,6 +66,10 @@ struct shrinker {
/* These are for internal use */ /* These are for internal use */
struct list_head list; struct list_head list;
#ifdef CONFIG_MEMCG_KMEM
/* ID in shrinker_idr */
int id;
#endif
/* objs pending delete, per node */ /* objs pending delete, per node */
atomic_long_t *nr_deferred; atomic_long_t *nr_deferred;
}; };
......
...@@ -169,6 +169,50 @@ unsigned long vm_total_pages; ...@@ -169,6 +169,50 @@ unsigned long vm_total_pages;
static LIST_HEAD(shrinker_list); static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem); static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG_KMEM
static DEFINE_IDR(shrinker_idr);
static int shrinker_nr_max;
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
int id, ret = -ENOMEM;
down_write(&shrinker_rwsem);
/* This may call shrinker, so it must use down_read_trylock() */
id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
if (id < 0)
goto unlock;
if (id >= shrinker_nr_max)
shrinker_nr_max = id + 1;
shrinker->id = id;
ret = 0;
unlock:
up_write(&shrinker_rwsem);
return ret;
}
static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
int id = shrinker->id;
BUG_ON(id < 0);
down_write(&shrinker_rwsem);
idr_remove(&shrinker_idr, id);
up_write(&shrinker_rwsem);
}
#else /* CONFIG_MEMCG_KMEM */
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
return 0;
}
static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
}
#endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
static bool global_reclaim(struct scan_control *sc) static bool global_reclaim(struct scan_control *sc)
{ {
...@@ -313,11 +357,28 @@ int prealloc_shrinker(struct shrinker *shrinker) ...@@ -313,11 +357,28 @@ int prealloc_shrinker(struct shrinker *shrinker)
shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
if (!shrinker->nr_deferred) if (!shrinker->nr_deferred)
return -ENOMEM; return -ENOMEM;
if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
if (prealloc_memcg_shrinker(shrinker))
goto free_deferred;
}
return 0; return 0;
free_deferred:
kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
return -ENOMEM;
} }
void free_prealloced_shrinker(struct shrinker *shrinker) void free_prealloced_shrinker(struct shrinker *shrinker)
{ {
if (!shrinker->nr_deferred)
return;
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker);
kfree(shrinker->nr_deferred); kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL; shrinker->nr_deferred = NULL;
} }
...@@ -347,6 +408,8 @@ void unregister_shrinker(struct shrinker *shrinker) ...@@ -347,6 +408,8 @@ void unregister_shrinker(struct shrinker *shrinker)
{ {
if (!shrinker->nr_deferred) if (!shrinker->nr_deferred)
return; return;
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker);
down_write(&shrinker_rwsem); down_write(&shrinker_rwsem);
list_del(&shrinker->list); list_del(&shrinker->list);
up_write(&shrinker_rwsem); up_write(&shrinker_rwsem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment