Commit 84c07d11 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Linus Torvalds

mm: introduce CONFIG_MEMCG_KMEM as combination of CONFIG_MEMCG && !CONFIG_SLOB

Introduce new config option, which is used to replace repeating
CONFIG_MEMCG && !CONFIG_SLOB pattern.  Next patches add a little more
memcg+kmem related code, so let's keep the defines more clearly.

Link: http://lkml.kernel.org/r/153063053670.1818.15013136946600481138.stgit@localhost.localdomainSigned-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Tested-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matthias Kaehlcke <mka@chromium.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Sahitya Tummala <stummala@codeaurora.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e0295238
...@@ -42,7 +42,7 @@ struct list_lru_node { ...@@ -42,7 +42,7 @@ struct list_lru_node {
spinlock_t lock; spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */ /* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru; struct list_lru_one lru;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg __rcu *memcg_lrus; struct list_lru_memcg __rcu *memcg_lrus;
#endif #endif
...@@ -51,7 +51,7 @@ struct list_lru_node { ...@@ -51,7 +51,7 @@ struct list_lru_node {
struct list_lru { struct list_lru {
struct list_lru_node *node; struct list_lru_node *node;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
struct list_head list; struct list_head list;
#endif #endif
}; };
......
...@@ -271,7 +271,7 @@ struct mem_cgroup { ...@@ -271,7 +271,7 @@ struct mem_cgroup {
bool tcpmem_active; bool tcpmem_active;
int tcpmem_pressure; int tcpmem_pressure;
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
/* Index in the kmem_cache->memcg_params.memcg_caches array */ /* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id; int kmemcg_id;
enum memcg_kmem_state kmem_state; enum memcg_kmem_state kmem_state;
...@@ -1231,7 +1231,7 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, ...@@ -1231,7 +1231,7 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void memcg_kmem_uncharge(struct page *page, int order); void memcg_kmem_uncharge(struct page *page, int order);
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
extern struct static_key_false memcg_kmem_enabled_key; extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq; extern struct workqueue_struct *memcg_kmem_cache_wq;
...@@ -1284,6 +1284,6 @@ static inline void memcg_put_cache_ids(void) ...@@ -1284,6 +1284,6 @@ static inline void memcg_put_cache_ids(void)
{ {
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */
...@@ -723,7 +723,7 @@ struct task_struct { ...@@ -723,7 +723,7 @@ struct task_struct {
#endif #endif
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
unsigned in_user_fault:1; unsigned in_user_fault:1;
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1; unsigned memcg_kmem_skip_account:1;
#endif #endif
#endif #endif
......
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
# define SLAB_FAILSLAB 0 # define SLAB_FAILSLAB 0
#endif #endif
/* Account to memcg */ /* Account to memcg */
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
#else #else
# define SLAB_ACCOUNT 0 # define SLAB_ACCOUNT 0
......
...@@ -708,6 +708,11 @@ config MEMCG_SWAP_ENABLED ...@@ -708,6 +708,11 @@ config MEMCG_SWAP_ENABLED
select this option (if, for some reason, they need to disable it select this option (if, for some reason, they need to disable it
then swapaccount=0 does the trick). then swapaccount=0 does the trick).
config MEMCG_KMEM
bool
depends on MEMCG && !SLOB
default y
config BLK_CGROUP config BLK_CGROUP
bool "IO controller" bool "IO controller"
depends on BLOCK depends on BLOCK
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus); static LIST_HEAD(list_lrus);
static DEFINE_MUTEX(list_lrus_mutex); static DEFINE_MUTEX(list_lrus_mutex);
...@@ -103,7 +103,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) ...@@ -103,7 +103,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
{ {
return &nlru->lru; return &nlru->lru;
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
bool list_lru_add(struct list_lru *lru, struct list_head *item) bool list_lru_add(struct list_lru *lru, struct list_head *item)
{ {
...@@ -284,7 +284,7 @@ static void init_one_lru(struct list_lru_one *l) ...@@ -284,7 +284,7 @@ static void init_one_lru(struct list_lru_one *l)
l->nr_items = 0; l->nr_items = 0;
} }
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
int begin, int end) int begin, int end)
{ {
...@@ -543,7 +543,7 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) ...@@ -543,7 +543,7 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
static void memcg_destroy_list_lru(struct list_lru *lru) static void memcg_destroy_list_lru(struct list_lru *lru)
{ {
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
int __list_lru_init(struct list_lru *lru, bool memcg_aware, int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key) struct lock_class_key *key)
......
...@@ -251,7 +251,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) ...@@ -251,7 +251,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
return (memcg == root_mem_cgroup); return (memcg == root_mem_cgroup);
} }
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
/* /*
* This will be the memcg's index in each cache's ->memcg_params.memcg_caches. * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
* The main reason for not using cgroup id for this: * The main reason for not using cgroup id for this:
...@@ -305,7 +305,7 @@ EXPORT_SYMBOL(memcg_kmem_enabled_key); ...@@ -305,7 +305,7 @@ EXPORT_SYMBOL(memcg_kmem_enabled_key);
struct workqueue_struct *memcg_kmem_cache_wq; struct workqueue_struct *memcg_kmem_cache_wq;
#endif /* !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
/** /**
* mem_cgroup_css_from_page - css of the memcg associated with a page * mem_cgroup_css_from_page - css of the memcg associated with a page
...@@ -2215,7 +2215,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, ...@@ -2215,7 +2215,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
unlock_page_lru(page, isolated); unlock_page_lru(page, isolated);
} }
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
static int memcg_alloc_cache_id(void) static int memcg_alloc_cache_id(void)
{ {
int id, size; int id, size;
...@@ -2480,7 +2480,7 @@ void memcg_kmem_uncharge(struct page *page, int order) ...@@ -2480,7 +2480,7 @@ void memcg_kmem_uncharge(struct page *page, int order)
css_put_many(&memcg->css, nr_pages); css_put_many(&memcg->css, nr_pages);
} }
#endif /* !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
...@@ -2875,7 +2875,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, ...@@ -2875,7 +2875,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
} }
} }
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
static int memcg_online_kmem(struct mem_cgroup *memcg) static int memcg_online_kmem(struct mem_cgroup *memcg)
{ {
int memcg_id; int memcg_id;
...@@ -2975,7 +2975,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) ...@@ -2975,7 +2975,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
static void memcg_free_kmem(struct mem_cgroup *memcg) static void memcg_free_kmem(struct mem_cgroup *memcg)
{ {
} }
#endif /* !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
static int memcg_update_kmem_max(struct mem_cgroup *memcg, static int memcg_update_kmem_max(struct mem_cgroup *memcg,
unsigned long max) unsigned long max)
...@@ -4279,7 +4279,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) ...@@ -4279,7 +4279,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
INIT_LIST_HEAD(&memcg->event_list); INIT_LIST_HEAD(&memcg->event_list);
spin_lock_init(&memcg->event_list_lock); spin_lock_init(&memcg->event_list_lock);
memcg->socket_pressure = jiffies; memcg->socket_pressure = jiffies;
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
memcg->kmemcg_id = -1; memcg->kmemcg_id = -1;
#endif #endif
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
...@@ -6119,7 +6119,7 @@ static int __init mem_cgroup_init(void) ...@@ -6119,7 +6119,7 @@ static int __init mem_cgroup_init(void)
{ {
int cpu, node; int cpu, node;
#ifndef CONFIG_SLOB #ifdef CONFIG_MEMCG_KMEM
/* /*
* Kmem cache creation is mostly done with the slab_mutex held, * Kmem cache creation is mostly done with the slab_mutex held,
* so use a workqueue with limited concurrency to avoid stalling * so use a workqueue with limited concurrency to avoid stalling
......
...@@ -203,7 +203,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -203,7 +203,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */ /* List of all root caches. */
extern struct list_head slab_root_caches; extern struct list_head slab_root_caches;
...@@ -296,7 +296,7 @@ extern void memcg_link_cache(struct kmem_cache *s); ...@@ -296,7 +296,7 @@ extern void memcg_link_cache(struct kmem_cache *s);
extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
void (*deact_fn)(struct kmem_cache *)); void (*deact_fn)(struct kmem_cache *));
#else /* CONFIG_MEMCG && !CONFIG_SLOB */ #else /* CONFIG_MEMCG_KMEM */
/* If !memcg, all caches are root. */ /* If !memcg, all caches are root. */
#define slab_root_caches slab_caches #define slab_root_caches slab_caches
...@@ -351,7 +351,7 @@ static inline void memcg_link_cache(struct kmem_cache *s) ...@@ -351,7 +351,7 @@ static inline void memcg_link_cache(struct kmem_cache *s)
{ {
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{ {
......
...@@ -127,7 +127,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, ...@@ -127,7 +127,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
return i; return i;
} }
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
LIST_HEAD(slab_root_caches); LIST_HEAD(slab_root_caches);
...@@ -256,7 +256,7 @@ static inline void destroy_memcg_params(struct kmem_cache *s) ...@@ -256,7 +256,7 @@ static inline void destroy_memcg_params(struct kmem_cache *s)
static inline void memcg_unlink_cache(struct kmem_cache *s) static inline void memcg_unlink_cache(struct kmem_cache *s)
{ {
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
/* /*
* Figure out what the alignment of the objects will be given a set of * Figure out what the alignment of the objects will be given a set of
...@@ -584,7 +584,7 @@ static int shutdown_cache(struct kmem_cache *s) ...@@ -584,7 +584,7 @@ static int shutdown_cache(struct kmem_cache *s)
return 0; return 0;
} }
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) #ifdef CONFIG_MEMCG_KMEM
/* /*
* memcg_create_kmem_cache - Create a cache for a memory cgroup. * memcg_create_kmem_cache - Create a cache for a memory cgroup.
* @memcg: The memory cgroup the new cache is for. * @memcg: The memory cgroup the new cache is for.
...@@ -861,7 +861,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) ...@@ -861,7 +861,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
static inline void flush_memcg_workqueue(struct kmem_cache *s) static inline void flush_memcg_workqueue(struct kmem_cache *s)
{ {
} }
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s) void slab_kmem_cache_release(struct kmem_cache *s)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment