Commit 6c48a1d0 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

per-zone and reclaim enhancements for memory controller: remember reclaim priority in memory cgroup

Functions to remember reclaim priority per cgroup (as zone->prev_priority)

[akpm@linux-foundation.org: build fixes]
[akpm@linux-foundation.org: more build fixes]
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Paul Menage <menage@google.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5932f367
...@@ -70,6 +70,11 @@ extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); ...@@ -70,6 +70,11 @@ extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem); extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority);
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
#else /* CONFIG_CGROUP_MEM_CONT */ #else /* CONFIG_CGROUP_MEM_CONT */
...@@ -153,6 +158,21 @@ static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) ...@@ -153,6 +158,21 @@ static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
return 0; return 0;
} }
static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
return 0;
}
static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority)
{
}
static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority)
{
}
#endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */
......
...@@ -132,6 +132,7 @@ struct mem_cgroup { ...@@ -132,6 +132,7 @@ struct mem_cgroup {
*/ */
spinlock_t lru_lock; spinlock_t lru_lock;
unsigned long control_type; /* control RSS or RSS+Pagecache */ unsigned long control_type; /* control RSS or RSS+Pagecache */
int prev_priority; /* for recording reclaim priority */
/* /*
* statistics. * statistics.
*/ */
...@@ -451,6 +452,25 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) ...@@ -451,6 +452,25 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
return (long) (active / (inactive + 1)); return (long) (active / (inactive + 1));
} }
/*
* prev_priority control...this will be used in memory reclaim path.
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
return mem->prev_priority;
}
void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
if (priority < mem->prev_priority)
mem->prev_priority = priority;
}
void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
mem->prev_priority = priority;
}
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst, struct list_head *dst,
unsigned long *scanned, int order, unsigned long *scanned, int order,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment