Commit f212ad7c authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

memcg: add memcg sanity checks at allocating and freeing pages

Add checks at allocating or freeing a page whether the page is used (iow,
charged) from the view point of memcg.

This check may be useful in debugging a problem and we did similar checks
before the commit 52d4b9ac(memcg: allocate all page_cgroup at boot).

This patch adds some overheads at allocating or freeing memory, so it's
enabled only when CONFIG_DEBUG_VM is enabled.
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent af4a6621
...@@ -151,6 +151,10 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *mem); ...@@ -151,6 +151,10 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
#endif #endif
#ifdef CONFIG_DEBUG_VM
bool mem_cgroup_bad_page_check(struct page *page);
void mem_cgroup_print_bad_page(struct page *page);
#endif
#else /* CONFIG_CGROUP_MEM_RES_CTLR */ #else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup; struct mem_cgroup;
...@@ -352,5 +356,18 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head, ...@@ -352,5 +356,18 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head,
#endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* CONFIG_CGROUP_MEM_CONT */
#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
static inline bool
mem_cgroup_bad_page_check(struct page *page)
{
return false;
}
static inline void
mem_cgroup_print_bad_page(struct page *page)
{
}
#endif
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */
...@@ -3046,6 +3046,52 @@ int mem_cgroup_shmem_charge_fallback(struct page *page, ...@@ -3046,6 +3046,52 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
return ret; return ret;
} }
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
struct page_cgroup *pc;
pc = lookup_page_cgroup(page);
if (likely(pc) && PageCgroupUsed(pc))
return pc;
return NULL;
}
bool mem_cgroup_bad_page_check(struct page *page)
{
if (mem_cgroup_disabled())
return false;
return lookup_page_cgroup_used(page) != NULL;
}
void mem_cgroup_print_bad_page(struct page *page)
{
struct page_cgroup *pc;
pc = lookup_page_cgroup_used(page);
if (pc) {
int ret = -1;
char *path;
printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
pc, pc->flags, pc->mem_cgroup);
path = kmalloc(PATH_MAX, GFP_KERNEL);
if (path) {
rcu_read_lock();
ret = cgroup_path(pc->mem_cgroup->css.cgroup,
path, PATH_MAX);
rcu_read_unlock();
}
printk(KERN_CONT "(%s)\n",
(ret < 0) ? "cannot get the path" : path);
kfree(path);
}
}
#endif
static DEFINE_MUTEX(set_limit_mutex); static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <linux/compaction.h> #include <linux/compaction.h>
#include <trace/events/kmem.h> #include <trace/events/kmem.h>
#include <linux/ftrace_event.h> #include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/div64.h> #include <asm/div64.h>
...@@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page) ...@@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page)
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
(page->mapping != NULL) | (page->mapping != NULL) |
(atomic_read(&page->_count) != 0) | (atomic_read(&page->_count) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
(mem_cgroup_bad_page_check(page)))) {
bad_page(page); bad_page(page);
return 1; return 1;
} }
...@@ -754,7 +756,8 @@ static inline int check_new_page(struct page *page) ...@@ -754,7 +756,8 @@ static inline int check_new_page(struct page *page)
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
(page->mapping != NULL) | (page->mapping != NULL) |
(atomic_read(&page->_count) != 0) | (atomic_read(&page->_count) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
(mem_cgroup_bad_page_check(page)))) {
bad_page(page); bad_page(page);
return 1; return 1;
} }
...@@ -5684,4 +5687,5 @@ void dump_page(struct page *page) ...@@ -5684,4 +5687,5 @@ void dump_page(struct page *page)
page, atomic_read(&page->_count), page_mapcount(page), page, atomic_read(&page->_count), page_mapcount(page),
page->mapping, page->index); page->mapping, page->index);
dump_page_flags(page->flags); dump_page_flags(page->flags);
mem_cgroup_print_bad_page(page);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment