Commit 925b7673 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: make per-memcg LRU lists exclusive

Now that all code that operated on global per-zone LRU lists is
converted to operate on per-memory cgroup LRU lists instead, there is no
reason to keep the double-LRU scheme around any longer.

The pc->lru member is removed and page->lru is linked directly to the
per-memory cgroup LRU lists, which removes two pointers from a
descriptor that exists for every page frame in the system.
Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarYing Han <yinghan@google.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6290df54
...@@ -32,14 +32,6 @@ enum mem_cgroup_page_stat_item { ...@@ -32,14 +32,6 @@ enum mem_cgroup_page_stat_item {
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
}; };
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
isolate_mode_t mode,
struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file);
struct mem_cgroup_reclaim_cookie { struct mem_cgroup_reclaim_cookie {
struct zone *zone; struct zone *zone;
int priority; int priority;
...@@ -69,13 +61,14 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); ...@@ -69,13 +61,14 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
extern void mem_cgroup_rotate_reclaimable_page(struct page *page); struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); enum lru_list);
extern void mem_cgroup_del_lru(struct page *page); void mem_cgroup_lru_del_list(struct page *, enum lru_list);
extern void mem_cgroup_move_lists(struct page *page, void mem_cgroup_lru_del(struct page *);
enum lru_list from, enum lru_list to); struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
/* For coalescing uncharge for reducing memcg' overhead*/ /* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void); extern void mem_cgroup_uncharge_start(void);
...@@ -223,33 +216,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) ...@@ -223,33 +216,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{ {
} }
static inline void mem_cgroup_add_lru_list(struct page *page, int lru) static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
{ struct mem_cgroup *memcg)
}
static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
{ {
return ; return &zone->lruvec;
} }
static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
struct page *page,
enum lru_list lru)
{ {
return ; return &zone->lruvec;
} }
static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{ {
return ;
} }
static inline void mem_cgroup_del_lru(struct page *page) static inline void mem_cgroup_lru_del(struct page *page)
{ {
return ;
} }
static inline void static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) struct page *page,
enum lru_list from,
enum lru_list to)
{ {
return &zone->lruvec;
} }
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
......
...@@ -21,27 +21,22 @@ static inline int page_is_file_cache(struct page *page) ...@@ -21,27 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page); return !PageSwapBacked(page);
} }
static inline void
__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
struct list_head *head)
{
list_add(&page->lru, head);
__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
mem_cgroup_add_lru_list(page, l);
}
static inline void static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{ {
__add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]); struct lruvec *lruvec;
lruvec = mem_cgroup_lru_add_list(zone, page, l);
list_add(&page->lru, &lruvec->lists[l]);
__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
} }
static inline void static inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{ {
mem_cgroup_lru_del_list(page, l);
list_del(&page->lru); list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
mem_cgroup_del_lru_list(page, l);
} }
/** /**
...@@ -64,7 +59,6 @@ del_page_from_lru(struct zone *zone, struct page *page) ...@@ -64,7 +59,6 @@ del_page_from_lru(struct zone *zone, struct page *page)
{ {
enum lru_list l; enum lru_list l;
list_del(&page->lru);
if (PageUnevictable(page)) { if (PageUnevictable(page)) {
__ClearPageUnevictable(page); __ClearPageUnevictable(page);
l = LRU_UNEVICTABLE; l = LRU_UNEVICTABLE;
...@@ -75,8 +69,9 @@ del_page_from_lru(struct zone *zone, struct page *page) ...@@ -75,8 +69,9 @@ del_page_from_lru(struct zone *zone, struct page *page)
l += LRU_ACTIVE; l += LRU_ACTIVE;
} }
} }
mem_cgroup_lru_del_list(page, l);
list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
mem_cgroup_del_lru_list(page, l);
} }
/** /**
......
...@@ -31,7 +31,6 @@ enum { ...@@ -31,7 +31,6 @@ enum {
struct page_cgroup { struct page_cgroup {
unsigned long flags; unsigned long flags;
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
struct list_head lru; /* per cgroup LRU list */
}; };
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
......
...@@ -995,6 +995,27 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) ...@@ -995,6 +995,27 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
} }
EXPORT_SYMBOL(mem_cgroup_count_vm_event); EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec
* @mem: memcg of the wanted lruvec
*
* Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller
* is disabled.
*/
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
struct mem_cgroup *memcg)
{
struct mem_cgroup_per_zone *mz;
if (mem_cgroup_disabled())
return &zone->lruvec;
mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
return &mz->lruvec;
}
/* /*
* Following LRU functions are allowed to be used without PCG_LOCK. * Following LRU functions are allowed to be used without PCG_LOCK.
* Operations are called by routine of global LRU independently from memcg. * Operations are called by routine of global LRU independently from memcg.
...@@ -1009,104 +1030,123 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event); ...@@ -1009,104 +1030,123 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
* When moving account, the page is not on LRU. It's isolated. * When moving account, the page is not on LRU. It's isolated.
*/ */
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) /**
{ * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
struct page_cgroup *pc; * @zone: zone of the page
struct mem_cgroup_per_zone *mz; * @page: the page
* @lru: current lru
if (mem_cgroup_disabled()) *
return; * This function accounts for @page being added to @lru, and returns
pc = lookup_page_cgroup(page); * the lruvec for the given @zone and the memcg @page is charged to.
/* can happen while we handle swapcache. */ *
if (!TestClearPageCgroupAcctLRU(pc)) * The callsite is then responsible for physically linking the page to
return; * the returned lruvec->lists[@lru].
VM_BUG_ON(!pc->mem_cgroup);
/*
* We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU.
*/
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
VM_BUG_ON(list_empty(&pc->lru));
list_del_init(&pc->lru);
}
void mem_cgroup_del_lru(struct page *page)
{
mem_cgroup_del_lru_list(page, page_lru(page));
}
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
*/ */
void mem_cgroup_rotate_reclaimable_page(struct page *page) struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
enum lru_list lru)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc; struct page_cgroup *pc;
enum lru_list lru = page_lru(page);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return &zone->lruvec;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
/* unused page is not rotated. */ VM_BUG_ON(PageCgroupAcctLRU(pc));
if (!PageCgroupUsed(pc)) /*
return; * putback: charge:
* SetPageLRU SetPageCgroupUsed
* smp_mb smp_mb
* PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
*
* Ensure that one of the two sides adds the page to the memcg
* LRU during a race.
*/
smp_mb();
/*
* If the page is uncharged, it may be freed soon, but it
* could also be swap cache (readahead, swapoff) that needs to
* be reclaimable in the future. root_mem_cgroup will babysit
* it for the time being.
*/
if (PageCgroupUsed(pc)) {
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb(); smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); memcg = pc->mem_cgroup;
list_move_tail(&pc->lru, &mz->lruvec.lists[lru]); SetPageCgroupAcctLRU(pc);
} else
memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
return &mz->lruvec;
} }
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) /**
* mem_cgroup_lru_del_list - account for removing an lru page
* @page: the page
* @lru: target lru
*
* This function accounts for @page being removed from @lru.
*
* The callsite is then responsible for physically unlinking
* @page->lru.
*/
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc; struct page_cgroup *pc;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
/* unused page is not rotated. */ /*
if (!PageCgroupUsed(pc)) * root_mem_cgroup babysits uncharged LRU pages, but
return; * PageCgroupUsed is cleared when the page is about to get
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ * freed. PageCgroupAcctLRU remembers whether the
smp_rmb(); * LRU-accounting happened against pc->mem_cgroup or
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); * root_mem_cgroup.
list_move(&pc->lru, &mz->lruvec.lists[lru]); */
if (TestClearPageCgroupAcctLRU(pc)) {
VM_BUG_ON(!pc->mem_cgroup);
memcg = pc->mem_cgroup;
} else
memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
} }
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) void mem_cgroup_lru_del(struct page *page)
{ {
struct page_cgroup *pc; mem_cgroup_lru_del_list(page, page_lru(page));
struct mem_cgroup_per_zone *mz; }
if (mem_cgroup_disabled()) /**
return; * mem_cgroup_lru_move_lists - account for moving a page between lrus
pc = lookup_page_cgroup(page); * @zone: zone of the page
VM_BUG_ON(PageCgroupAcctLRU(pc)); * @page: the page
/* * @from: current lru
* putback: charge: * @to: target lru
* SetPageLRU SetPageCgroupUsed
* smp_mb smp_mb
* PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
* *
* Ensure that one of the two sides adds the page to the memcg * This function accounts for @page being moved between the lrus @from
* LRU during a race. * and @to, and returns the lruvec for the given @zone and the memcg
* @page is charged to.
*
* The callsite is then responsible for physically relinking
* @page->lru to the returned lruvec->lists[@to].
*/ */
smp_mb(); struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
if (!PageCgroupUsed(pc)) struct page *page,
return; enum lru_list from,
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ enum lru_list to)
smp_rmb(); {
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); /* XXX: Optimize this, especially for @from == @to */
/* huge page split is done under lru_lock. so, we have no races. */ mem_cgroup_lru_del_list(page, from);
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); return mem_cgroup_lru_add_list(zone, page, to);
SetPageCgroupAcctLRU(pc);
list_add(&pc->lru, &mz->lruvec.lists[lru]);
} }
/* /*
...@@ -1117,6 +1157,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) ...@@ -1117,6 +1157,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
*/ */
static void mem_cgroup_lru_del_before_commit(struct page *page) static void mem_cgroup_lru_del_before_commit(struct page *page)
{ {
enum lru_list lru;
unsigned long flags; unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
...@@ -1133,17 +1174,28 @@ static void mem_cgroup_lru_del_before_commit(struct page *page) ...@@ -1133,17 +1174,28 @@ static void mem_cgroup_lru_del_before_commit(struct page *page)
return; return;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
lru = page_lru(page);
/* /*
* Forget old LRU when this page_cgroup is *not* used. This Used bit * The uncharged page could still be registered to the LRU of
* is guarded by lock_page() because the page is SwapCache. * the stale pc->mem_cgroup.
*
* As pc->mem_cgroup is about to get overwritten, the old LRU
* accounting needs to be taken care of. Let root_mem_cgroup
* babysit the page until the new memcg is responsible for it.
*
* The PCG_USED bit is guarded by lock_page() as the page is
* swapcache/pagecache.
*/ */
if (!PageCgroupUsed(pc)) if (PageLRU(page) && PageCgroupAcctLRU(pc) && !PageCgroupUsed(pc)) {
mem_cgroup_del_lru_list(page, page_lru(page)); del_page_from_lru_list(zone, page, lru);
add_page_to_lru_list(zone, page, lru);
}
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
} }
static void mem_cgroup_lru_add_after_commit(struct page *page) static void mem_cgroup_lru_add_after_commit(struct page *page)
{ {
enum lru_list lru;
unsigned long flags; unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
...@@ -1161,22 +1213,22 @@ static void mem_cgroup_lru_add_after_commit(struct page *page) ...@@ -1161,22 +1213,22 @@ static void mem_cgroup_lru_add_after_commit(struct page *page)
if (likely(!PageLRU(page))) if (likely(!PageLRU(page)))
return; return;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
/* link when the page is linked to LRU but page_cgroup isn't */ lru = page_lru(page);
if (PageLRU(page) && !PageCgroupAcctLRU(pc)) /*
mem_cgroup_add_lru_list(page, page_lru(page)); * If the page is not on the LRU, someone will soon put it
* there. If it is, and also already accounted for on the
* memcg-side, it must be on the right lruvec as setting
* pc->mem_cgroup and PageCgroupUsed is properly ordered.
* Otherwise, root_mem_cgroup has been babysitting the page
* during the charge. Move it to the new memcg now.
*/
if (PageLRU(page) && !PageCgroupAcctLRU(pc)) {
del_page_from_lru_list(zone, page, lru);
add_page_to_lru_list(zone, page, lru);
}
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
} }
void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to)
{
if (mem_cgroup_disabled())
return;
mem_cgroup_del_lru_list(page, from);
mem_cgroup_add_lru_list(page, to);
}
/* /*
* Checks whether given mem is same or in the root_mem_cgroup's * Checks whether given mem is same or in the root_mem_cgroup's
* hierarchy subtree * hierarchy subtree
...@@ -1282,68 +1334,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) ...@@ -1282,68 +1334,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return &mz->reclaim_stat; return &mz->reclaim_stat;
} }
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
isolate_mode_t mode,
struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file)
{
unsigned long nr_taken = 0;
struct page *page;
unsigned long scan;
LIST_HEAD(pc_list);
struct list_head *src;
struct page_cgroup *pc, *tmp;
int nid = zone_to_nid(z);
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
int lru = LRU_FILE * file + active;
int ret;
BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
src = &mz->lruvec.lists[lru];
scan = 0;
list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
if (scan >= nr_to_scan)
break;
if (unlikely(!PageCgroupUsed(pc)))
continue;
page = lookup_cgroup_page(pc);
if (unlikely(!PageLRU(page)))
continue;
scan++;
ret = __isolate_lru_page(page, mode, file);
switch (ret) {
case 0:
list_move(&page->lru, dst);
mem_cgroup_del_lru(page);
nr_taken += hpage_nr_pages(page);
break;
case -EBUSY:
/* we don't affect global LRU but rotate in our LRU */
mem_cgroup_rotate_lru_list(page, page_lru(page));
break;
default:
break;
}
}
*scanned = scan;
trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
0, 0, 0, mode);
return nr_taken;
}
#define mem_cgroup_from_res_counter(counter, member) \ #define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member) container_of(counter, struct mem_cgroup, member)
...@@ -3726,11 +3716,11 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -3726,11 +3716,11 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
int node, int zid, enum lru_list lru) int node, int zid, enum lru_list lru)
{ {
struct zone *zone;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct page_cgroup *pc, *busy;
unsigned long flags, loop; unsigned long flags, loop;
struct list_head *list; struct list_head *list;
struct page *busy;
struct zone *zone;
int ret = 0; int ret = 0;
zone = &NODE_DATA(node)->node_zones[zid]; zone = &NODE_DATA(node)->node_zones[zid];
...@@ -3742,6 +3732,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, ...@@ -3742,6 +3732,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
loop += 256; loop += 256;
busy = NULL; busy = NULL;
while (loop--) { while (loop--) {
struct page_cgroup *pc;
struct page *page; struct page *page;
ret = 0; ret = 0;
...@@ -3750,16 +3741,16 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, ...@@ -3750,16 +3741,16 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
break; break;
} }
pc = list_entry(list->prev, struct page_cgroup, lru); page = list_entry(list->prev, struct page, lru);
if (busy == pc) { if (busy == page) {
list_move(&pc->lru, list); list_move(&page->lru, list);
busy = NULL; busy = NULL;
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
continue; continue;
} }
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
page = lookup_cgroup_page(pc); pc = lookup_page_cgroup(page);
ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL); ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
if (ret == -ENOMEM) if (ret == -ENOMEM)
...@@ -3767,7 +3758,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, ...@@ -3767,7 +3758,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
if (ret == -EBUSY || ret == -EINVAL) { if (ret == -EBUSY || ret == -EINVAL) {
/* found lock contention or "pc" is obsolete. */ /* found lock contention or "pc" is obsolete. */
busy = pc; busy = page;
cond_resched(); cond_resched();
} else } else
busy = NULL; busy = NULL;
......
...@@ -16,7 +16,6 @@ static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id) ...@@ -16,7 +16,6 @@ static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
pc->flags = 0; pc->flags = 0;
set_page_cgroup_array_id(pc, id); set_page_cgroup_array_id(pc, id);
pc->mem_cgroup = NULL; pc->mem_cgroup = NULL;
INIT_LIST_HEAD(&pc->lru);
} }
static unsigned long total_usage; static unsigned long total_usage;
......
...@@ -232,12 +232,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, ...@@ -232,12 +232,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
static void pagevec_move_tail_fn(struct page *page, void *arg) static void pagevec_move_tail_fn(struct page *page, void *arg)
{ {
int *pgmoved = arg; int *pgmoved = arg;
struct zone *zone = page_zone(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page); enum lru_list lru = page_lru_base_type(page);
list_move_tail(&page->lru, &zone->lruvec.lists[lru]); struct lruvec *lruvec;
mem_cgroup_rotate_reclaimable_page(page);
lruvec = mem_cgroup_lru_move_lists(page_zone(page),
page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++; (*pgmoved)++;
} }
} }
...@@ -476,12 +478,13 @@ static void lru_deactivate_fn(struct page *page, void *arg) ...@@ -476,12 +478,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/ */
SetPageReclaim(page); SetPageReclaim(page);
} else { } else {
struct lruvec *lruvec;
/* /*
* The page's writeback ends up during pagevec * The page's writeback ends up during pagevec
* We moves tha page into tail of inactive. * We moves tha page into tail of inactive.
*/ */
list_move_tail(&page->lru, &zone->lruvec.lists[lru]); lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
mem_cgroup_rotate_reclaimable_page(page); list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED); __count_vm_event(PGROTATED);
} }
...@@ -663,6 +666,8 @@ void lru_add_page_tail(struct zone* zone, ...@@ -663,6 +666,8 @@ void lru_add_page_tail(struct zone* zone,
SetPageLRU(page_tail); SetPageLRU(page_tail);
if (page_evictable(page_tail, NULL)) { if (page_evictable(page_tail, NULL)) {
struct lruvec *lruvec;
if (PageActive(page)) { if (PageActive(page)) {
SetPageActive(page_tail); SetPageActive(page_tail);
active = 1; active = 1;
...@@ -672,11 +677,13 @@ void lru_add_page_tail(struct zone* zone, ...@@ -672,11 +677,13 @@ void lru_add_page_tail(struct zone* zone,
lru = LRU_INACTIVE_ANON; lru = LRU_INACTIVE_ANON;
} }
update_page_reclaim_stat(zone, page_tail, file, active); update_page_reclaim_stat(zone, page_tail, file, active);
lruvec = mem_cgroup_lru_add_list(zone, page_tail, lru);
if (likely(PageLRU(page))) if (likely(PageLRU(page)))
__add_page_to_lru_list(zone, page_tail, lru, list_add(&page_tail->lru, page->lru.prev);
page->lru.prev);
else else
add_page_to_lru_list(zone, page_tail, lru); list_add(&page_tail->lru, &lruvec->lists[lru]);
__mod_zone_page_state(zone, NR_LRU_BASE + lru,
hpage_nr_pages(page_tail));
} else { } else {
SetPageUnevictable(page_tail); SetPageUnevictable(page_tail);
add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
......
...@@ -1139,15 +1139,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1139,15 +1139,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode, file)) { switch (__isolate_lru_page(page, mode, file)) {
case 0: case 0:
mem_cgroup_lru_del(page);
list_move(&page->lru, dst); list_move(&page->lru, dst);
mem_cgroup_del_lru(page);
nr_taken += hpage_nr_pages(page); nr_taken += hpage_nr_pages(page);
break; break;
case -EBUSY: case -EBUSY:
/* else it is being freed elsewhere */ /* else it is being freed elsewhere */
list_move(&page->lru, src); list_move(&page->lru, src);
mem_cgroup_rotate_lru_list(page, page_lru(page));
continue; continue;
default: default:
...@@ -1197,8 +1196,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1197,8 +1196,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
break; break;
if (__isolate_lru_page(cursor_page, mode, file) == 0) { if (__isolate_lru_page(cursor_page, mode, file) == 0) {
mem_cgroup_lru_del(cursor_page);
list_move(&cursor_page->lru, dst); list_move(&cursor_page->lru, dst);
mem_cgroup_del_lru(cursor_page);
nr_taken += hpage_nr_pages(cursor_page); nr_taken += hpage_nr_pages(cursor_page);
nr_lumpy_taken++; nr_lumpy_taken++;
if (PageDirty(cursor_page)) if (PageDirty(cursor_page))
...@@ -1239,18 +1238,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1239,18 +1238,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
return nr_taken; return nr_taken;
} }
static unsigned long isolate_pages_global(unsigned long nr, static unsigned long isolate_pages(unsigned long nr, struct mem_cgroup_zone *mz,
struct list_head *dst, struct list_head *dst,
unsigned long *scanned, int order, unsigned long *scanned, int order,
isolate_mode_t mode, isolate_mode_t mode, int active, int file)
struct zone *z, int active, int file)
{ {
struct lruvec *lruvec;
int lru = LRU_BASE; int lru = LRU_BASE;
lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
if (active) if (active)
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
if (file) if (file)
lru += LRU_FILE; lru += LRU_FILE;
return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst, return isolate_lru_pages(nr, &lruvec->lists[lru], dst,
scanned, order, mode, file); scanned, order, mode, file);
} }
...@@ -1518,14 +1519,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, ...@@ -1518,14 +1519,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(mz)) { nr_taken = isolate_pages(nr_to_scan, mz, &page_list,
nr_taken = isolate_pages_global(nr_to_scan, &page_list, &nr_scanned, sc->order,
&nr_scanned, sc->order, reclaim_mode, zone, 0, file); reclaim_mode, 0, file);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone,
mz->mem_cgroup, 0, file);
}
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
if (current_is_kswapd()) if (current_is_kswapd())
...@@ -1625,13 +1621,15 @@ static void move_active_pages_to_lru(struct zone *zone, ...@@ -1625,13 +1621,15 @@ static void move_active_pages_to_lru(struct zone *zone,
pagevec_init(&pvec, 1); pagevec_init(&pvec, 1);
while (!list_empty(list)) { while (!list_empty(list)) {
struct lruvec *lruvec;
page = lru_to_page(list); page = lru_to_page(list);
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
list_move(&page->lru, &zone->lruvec.lists[lru]); lruvec = mem_cgroup_lru_add_list(zone, page, lru);
mem_cgroup_add_lru_list(page, lru); list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += hpage_nr_pages(page); pgmoved += hpage_nr_pages(page);
if (!pagevec_add(&pvec, page) || list_empty(list)) { if (!pagevec_add(&pvec, page) || list_empty(list)) {
...@@ -1672,17 +1670,10 @@ static void shrink_active_list(unsigned long nr_pages, ...@@ -1672,17 +1670,10 @@ static void shrink_active_list(unsigned long nr_pages,
reclaim_mode |= ISOLATE_CLEAN; reclaim_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(mz)) {
nr_taken = isolate_pages_global(nr_pages, &l_hold, nr_taken = isolate_pages(nr_pages, mz, &l_hold,
&pgscanned, sc->order,
reclaim_mode, zone,
1, file);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
&pgscanned, sc->order, &pgscanned, sc->order,
reclaim_mode, zone, reclaim_mode, 1, file);
mz->mem_cgroup, 1, file);
}
if (global_reclaim(sc)) if (global_reclaim(sc))
zone->pages_scanned += pgscanned; zone->pages_scanned += pgscanned;
...@@ -3440,16 +3431,18 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) ...@@ -3440,16 +3431,18 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
*/ */
static void check_move_unevictable_page(struct page *page, struct zone *zone) static void check_move_unevictable_page(struct page *page, struct zone *zone)
{ {
VM_BUG_ON(PageActive(page)); struct lruvec *lruvec;
VM_BUG_ON(PageActive(page));
retry: retry:
ClearPageUnevictable(page); ClearPageUnevictable(page);
if (page_evictable(page, NULL)) { if (page_evictable(page, NULL)) {
enum lru_list l = page_lru_base_type(page); enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE); __dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lruvec.lists[l]); lruvec = mem_cgroup_lru_move_lists(zone, page,
mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); LRU_UNEVICTABLE, l);
list_move(&page->lru, &lruvec->lists[l]);
__inc_zone_state(zone, NR_INACTIVE_ANON + l); __inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED); __count_vm_event(UNEVICTABLE_PGRESCUED);
} else { } else {
...@@ -3457,8 +3450,9 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) ...@@ -3457,8 +3450,9 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
* rotate unevictable list * rotate unevictable list
*/ */
SetPageUnevictable(page); SetPageUnevictable(page);
list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]); lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); LRU_UNEVICTABLE);
list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
if (page_evictable(page, NULL)) if (page_evictable(page, NULL))
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment