Commit 52d4b9ac authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: allocate all page_cgroup at boot

Allocate all page_cgroup at boot and remove page_cgroup poitner from
struct page.  This patch adds an interface as

 struct page_cgroup *lookup_page_cgroup(struct page*)

All FLATMEM/DISCONTIGMEM/SPARSEMEM  and MEMORY_HOTPLUG is supported.

Remove page_cgroup pointer reduces the amount of memory by
 - 4 bytes per PAGE_SIZE.
 - 8 bytes per PAGE_SIZE
if memory controller is disabled. (even if configured.)

On usual 8GB x86-32 server, this saves 8MB of NORMAL_ZONE memory.
On my x86-64 server with 48GB of memory, this saves 96MB of memory.
I think this reduction makes sense.

By pre-allocation, kmalloc/kfree in charge/uncharge are removed.
This means
  - we're not necessary to be afraid of kmalloc faiulre.
    (this can happen because of gfp_mask type.)
  - we can avoid calling kmalloc/kfree.
  - we can avoid allocating tons of small objects which can be fragmented.
  - we can know what amount of memory will be used for this extra-lru handling.

I added printk message as

	"allocated %ld bytes of page_cgroup"
        "please try cgroup_disable=memory option if you don't want"

maybe enough informative for users.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c05555b5
...@@ -27,9 +27,6 @@ struct mm_struct; ...@@ -27,9 +27,6 @@ struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR #ifdef CONFIG_CGROUP_MEM_RES_CTLR
#define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
extern struct page_cgroup *page_get_page_cgroup(struct page *page);
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
...@@ -72,16 +69,8 @@ extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, ...@@ -72,16 +69,8 @@ extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
int priority, enum lru_list lru); int priority, enum lru_list lru);
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
static inline void page_reset_bad_cgroup(struct page *page)
{
}
static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
{
return NULL;
}
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
static inline int mem_cgroup_charge(struct page *page, static inline int mem_cgroup_charge(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask) struct mm_struct *mm, gfp_t gfp_mask)
{ {
......
...@@ -94,9 +94,6 @@ struct page { ...@@ -94,9 +94,6 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */ not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */ #endif /* WANT_PAGE_VIRTUAL */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
unsigned long page_cgroup;
#endif
}; };
/* /*
......
...@@ -601,8 +601,11 @@ typedef struct pglist_data { ...@@ -601,8 +601,11 @@ typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES]; struct zone node_zones[MAX_NR_ZONES];
struct zonelist node_zonelists[MAX_ZONELISTS]; struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones; int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map; struct page *node_mem_map;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
struct page_cgroup *node_page_cgroup;
#endif
#endif #endif
struct bootmem_data *bdata; struct bootmem_data *bdata;
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
...@@ -931,6 +934,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) ...@@ -931,6 +934,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#endif #endif
struct page; struct page;
struct page_cgroup;
struct mem_section { struct mem_section {
/* /*
* This is, logically, a pointer to an array of struct * This is, logically, a pointer to an array of struct
...@@ -948,6 +952,14 @@ struct mem_section { ...@@ -948,6 +952,14 @@ struct mem_section {
/* See declaration of similar field in struct zone */ /* See declaration of similar field in struct zone */
unsigned long *pageblock_flags; unsigned long *pageblock_flags;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
* If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
* section. (see memcontrol.h/page_cgroup.h about this.)
*/
struct page_cgroup *page_cgroup;
unsigned long pad;
#endif
}; };
#ifdef CONFIG_SPARSEMEM_EXTREME #ifdef CONFIG_SPARSEMEM_EXTREME
......
#ifndef __LINUX_PAGE_CGROUP_H
#define __LINUX_PAGE_CGROUP_H
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
#include <linux/bit_spinlock.h>
/*
* Page Cgroup can be considered as an extended mem_map.
* A page_cgroup page is associated with every page descriptor. The
* page_cgroup helps us identify information about the cgroup
* All page cgroups are allocated at boot or memory hotplug event,
* then the page cgroup for pfn always exists.
*/
struct page_cgroup {
unsigned long flags;
struct mem_cgroup *mem_cgroup;
struct page *page;
struct list_head lru; /* per cgroup LRU list */
};
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat);
void __init page_cgroup_init(void);
struct page_cgroup *lookup_page_cgroup(struct page *page);
enum {
/* flags for mem_cgroup */
PCG_LOCK, /* page cgroup is locked */
PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */
/* flags for LRU placement */
PCG_ACTIVE, /* page is active in this cgroup */
PCG_FILE, /* page is file system backed */
PCG_UNEVICTABLE, /* page is unevictableable */
};
#define TESTPCGFLAG(uname, lname) \
static inline int PageCgroup##uname(struct page_cgroup *pc) \
{ return test_bit(PCG_##lname, &pc->flags); }
#define SETPCGFLAG(uname, lname) \
static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
{ set_bit(PCG_##lname, &pc->flags); }
#define CLEARPCGFLAG(uname, lname) \
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
{ clear_bit(PCG_##lname, &pc->flags); }
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED)
/* LRU management flags (from global-lru definition) */
TESTPCGFLAG(File, FILE)
SETPCGFLAG(File, FILE)
CLEARPCGFLAG(File, FILE)
TESTPCGFLAG(Active, ACTIVE)
SETPCGFLAG(Active, ACTIVE)
CLEARPCGFLAG(Active, ACTIVE)
TESTPCGFLAG(Unevictable, UNEVICTABLE)
SETPCGFLAG(Unevictable, UNEVICTABLE)
CLEARPCGFLAG(Unevictable, UNEVICTABLE)
static inline int page_cgroup_nid(struct page_cgroup *pc)
{
return page_to_nid(pc->page);
}
static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
{
return page_zonenum(pc->page);
}
static inline void lock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_lock(PCG_LOCK, &pc->flags);
}
static inline int trylock_page_cgroup(struct page_cgroup *pc)
{
return bit_spin_trylock(PCG_LOCK, &pc->flags);
}
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
}
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup;
static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
{
}
static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
{
return NULL;
}
#endif
#endif
...@@ -33,5 +33,4 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o ...@@ -33,5 +33,4 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_SMP) += allocpercpu.o obj-$(CONFIG_SMP) += allocpercpu.o
obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
This diff is collapsed.
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/page-isolation.h> #include <linux/page-isolation.h>
#include <linux/memcontrol.h> #include <linux/page_cgroup.h>
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -223,17 +223,12 @@ static inline int bad_range(struct zone *zone, struct page *page) ...@@ -223,17 +223,12 @@ static inline int bad_range(struct zone *zone, struct page *page)
static void bad_page(struct page *page) static void bad_page(struct page *page)
{ {
void *pc = page_get_page_cgroup(page);
printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
current->comm, page, (int)(2*sizeof(unsigned long)), current->comm, page, (int)(2*sizeof(unsigned long)),
(unsigned long)page->flags, page->mapping, (unsigned long)page->flags, page->mapping,
page_mapcount(page), page_count(page)); page_mapcount(page), page_count(page));
if (pc) {
printk(KERN_EMERG "cgroup:%p\n", pc);
page_reset_bad_cgroup(page);
}
printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
KERN_EMERG "Backtrace:\n"); KERN_EMERG "Backtrace:\n");
dump_stack(); dump_stack();
...@@ -457,7 +452,6 @@ static inline int free_pages_check(struct page *page) ...@@ -457,7 +452,6 @@ static inline int free_pages_check(struct page *page)
free_page_mlock(page); free_page_mlock(page);
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
(page->mapping != NULL) | (page->mapping != NULL) |
(page_get_page_cgroup(page) != NULL) |
(page_count(page) != 0) | (page_count(page) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
bad_page(page); bad_page(page);
...@@ -603,7 +597,6 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) ...@@ -603,7 +597,6 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{ {
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
(page->mapping != NULL) | (page->mapping != NULL) |
(page_get_page_cgroup(page) != NULL) |
(page_count(page) != 0) | (page_count(page) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
bad_page(page); bad_page(page);
...@@ -3438,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -3438,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
pgdat->nr_zones = 0; pgdat->nr_zones = 0;
init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->kswapd_wait);
pgdat->kswapd_max_order = 0; pgdat->kswapd_max_order = 0;
pgdat_page_cgroup_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) { for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j; struct zone *zone = pgdat->node_zones + j;
......
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/bit_spinlock.h>
#include <linux/page_cgroup.h>
#include <linux/hash.h>
#include <linux/memory.h>
static void __meminit
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
{
pc->flags = 0;
pc->mem_cgroup = NULL;
pc->page = pfn_to_page(pfn);
}
static unsigned long total_usage;
#if !defined(CONFIG_SPARSEMEM)
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
{
pgdat->node_page_cgroup = NULL;
}
struct page_cgroup *lookup_page_cgroup(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
unsigned long offset;
struct page_cgroup *base;
base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
if (unlikely(!base))
return NULL;
offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
return base + offset;
}
static int __init alloc_node_page_cgroup(int nid)
{
struct page_cgroup *base, *pc;
unsigned long table_size;
unsigned long start_pfn, nr_pages, index;
start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
table_size = sizeof(struct page_cgroup) * nr_pages;
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!base)
return -ENOMEM;
for (index = 0; index < nr_pages; index++) {
pc = base + index;
__init_page_cgroup(pc, start_pfn + index);
}
NODE_DATA(nid)->node_page_cgroup = base;
total_usage += table_size;
return 0;
}
void __init page_cgroup_init(void)
{
int nid, fail;
for_each_online_node(nid) {
fail = alloc_node_page_cgroup(nid);
if (fail)
goto fail;
}
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
printk(KERN_INFO "please try cgroup_disable=memory option if you"
" don't want\n");
return;
fail:
printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
panic("Out of memory");
}
#else /* CONFIG_FLAT_NODE_MEM_MAP */
struct page_cgroup *lookup_page_cgroup(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
return section->page_cgroup + pfn;
}
int __meminit init_section_page_cgroup(unsigned long pfn)
{
struct mem_section *section;
struct page_cgroup *base, *pc;
unsigned long table_size;
int nid, index;
section = __pfn_to_section(pfn);
if (section->page_cgroup)
return 0;
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
base = kmalloc_node(table_size, GFP_KERNEL, nid);
if (!base)
base = vmalloc_node(table_size, nid);
if (!base) {
printk(KERN_ERR "page cgroup allocation failure\n");
return -ENOMEM;
}
for (index = 0; index < PAGES_PER_SECTION; index++) {
pc = base + index;
__init_page_cgroup(pc, pfn + index);
}
section = __pfn_to_section(pfn);
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG
void __free_page_cgroup(unsigned long pfn)
{
struct mem_section *ms;
struct page_cgroup *base;
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_cgroup)
return;
base = ms->page_cgroup + pfn;
ms->page_cgroup = NULL;
if (is_vmalloc_addr(base))
vfree(base);
else
kfree(base);
}
int online_page_cgroup(unsigned long start_pfn,
unsigned long nr_pages,
int nid)
{
unsigned long start, end, pfn;
int fail = 0;
start = start_pfn & (PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
if (!pfn_present(pfn))
continue;
fail = init_section_page_cgroup(pfn);
}
if (!fail)
return 0;
/* rollback */
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_cgroup(pfn);
return -ENOMEM;
}
int offline_page_cgroup(unsigned long start_pfn,
unsigned long nr_pages, int nid)
{
unsigned long start, end, pfn;
start = start_pfn & (PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_cgroup(pfn);
return 0;
}
static int page_cgroup_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_notify *mn = arg;
int ret = 0;
switch (action) {
case MEM_GOING_ONLINE:
ret = online_page_cgroup(mn->start_pfn,
mn->nr_pages, mn->status_change_nid);
break;
case MEM_CANCEL_ONLINE:
case MEM_OFFLINE:
offline_page_cgroup(mn->start_pfn,
mn->nr_pages, mn->status_change_nid);
break;
case MEM_GOING_OFFLINE:
break;
case MEM_ONLINE:
case MEM_CANCEL_OFFLINE:
break;
}
ret = notifier_from_errno(ret);
return ret;
}
#endif
void __init page_cgroup_init(void)
{
unsigned long pfn;
int fail = 0;
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
if (!pfn_present(pfn))
continue;
fail = init_section_page_cgroup(pfn);
}
if (fail) {
printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
panic("Out of memory");
} else {
hotplug_memory_notifier(page_cgroup_callback, 0);
}
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
" want\n");
}
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
{
return;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment