Commit 9edad6ea authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: move page->mem_cgroup bad page handling into generic code

Now that the external page_cgroup data structure and its lookup is
gone, let the generic bad_page() check for page->mem_cgroup sanity.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Tejun Heo <tj@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d1ea48b
...@@ -173,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, ...@@ -173,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
void mem_cgroup_split_huge_fixup(struct page *head); void mem_cgroup_split_huge_fixup(struct page *head);
#endif #endif
#ifdef CONFIG_DEBUG_VM
bool mem_cgroup_bad_page_check(struct page *page);
void mem_cgroup_print_bad_page(struct page *page);
#endif
#else /* CONFIG_MEMCG */ #else /* CONFIG_MEMCG */
struct mem_cgroup; struct mem_cgroup;
...@@ -346,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) ...@@ -346,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
} }
#endif /* CONFIG_MEMCG */ #endif /* CONFIG_MEMCG */
#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
static inline bool
mem_cgroup_bad_page_check(struct page *page)
{
return false;
}
static inline void
mem_cgroup_print_bad_page(struct page *page)
{
}
#endif
enum { enum {
UNDER_LIMIT, UNDER_LIMIT,
SOFT_LIMIT, SOFT_LIMIT,
......
...@@ -983,18 +983,6 @@ config MEMCG ...@@ -983,18 +983,6 @@ config MEMCG
Provides a memory resource controller that manages both anonymous Provides a memory resource controller that manages both anonymous
memory and page cache. (See Documentation/cgroups/memory.txt) memory and page cache. (See Documentation/cgroups/memory.txt)
Note that setting this option increases fixed memory overhead
associated with each page of memory in the system. By this,
8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory
usage tracking struct at boot. Total amount of this is printed out
at boot.
Only enable when you're ok with these trade offs and really
sure you need the memory resource controller. Even when you enable
this, you can set "cgroup_disable=memory" at your boot option to
disable memory resource controller and you can avoid overheads.
(and lose benefits of memory resource controller)
config MEMCG_SWAP config MEMCG_SWAP
bool "Memory Resource Controller Swap Extension" bool "Memory Resource Controller Swap Extension"
depends on MEMCG && SWAP depends on MEMCG && SWAP
......
...@@ -95,7 +95,10 @@ void dump_page_badflags(struct page *page, const char *reason, ...@@ -95,7 +95,10 @@ void dump_page_badflags(struct page *page, const char *reason,
dump_flags(page->flags & badflags, dump_flags(page->flags & badflags,
pageflag_names, ARRAY_SIZE(pageflag_names)); pageflag_names, ARRAY_SIZE(pageflag_names));
} }
mem_cgroup_print_bad_page(page); #ifdef CONFIG_MEMCG
if (page->mem_cgroup)
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
#endif
} }
void dump_page(struct page *page, const char *reason) void dump_page(struct page *page, const char *reason)
......
...@@ -3157,21 +3157,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, ...@@ -3157,21 +3157,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
} }
#endif #endif
#ifdef CONFIG_DEBUG_VM
bool mem_cgroup_bad_page_check(struct page *page)
{
if (mem_cgroup_disabled())
return false;
return page->mem_cgroup != NULL;
}
void mem_cgroup_print_bad_page(struct page *page)
{
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
}
#endif
static DEFINE_MUTEX(memcg_limit_mutex); static DEFINE_MUTEX(memcg_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
......
...@@ -640,8 +640,10 @@ static inline int free_pages_check(struct page *page) ...@@ -640,8 +640,10 @@ static inline int free_pages_check(struct page *page)
bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
bad_flags = PAGE_FLAGS_CHECK_AT_FREE; bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
} }
if (unlikely(mem_cgroup_bad_page_check(page))) #ifdef CONFIG_MEMCG
bad_reason = "cgroup check failed"; if (unlikely(page->mem_cgroup))
bad_reason = "page still charged to cgroup";
#endif
if (unlikely(bad_reason)) { if (unlikely(bad_reason)) {
bad_page(page, bad_reason, bad_flags); bad_page(page, bad_reason, bad_flags);
return 1; return 1;
...@@ -900,8 +902,10 @@ static inline int check_new_page(struct page *page) ...@@ -900,8 +902,10 @@ static inline int check_new_page(struct page *page)
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
bad_flags = PAGE_FLAGS_CHECK_AT_PREP; bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
} }
if (unlikely(mem_cgroup_bad_page_check(page))) #ifdef CONFIG_MEMCG
bad_reason = "cgroup check failed"; if (unlikely(page->mem_cgroup))
bad_reason = "page still charged to cgroup";
#endif
if (unlikely(bad_reason)) { if (unlikely(bad_reason)) {
bad_page(page, bad_reason, bad_flags); bad_page(page, bad_reason, bad_flags);
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment