Commit 925b7673 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: make per-memcg LRU lists exclusive

Now that all code that operated on global per-zone LRU lists is
converted to operate on per-memory cgroup LRU lists instead, there is no
reason to keep the double-LRU scheme around any longer.

The pc->lru member is removed and page->lru is linked directly to the
per-memory cgroup LRU lists, which removes two pointers from a
descriptor that exists for every page frame in the system.
Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarYing Han <yinghan@google.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6290df54
......@@ -32,14 +32,6 @@ enum mem_cgroup_page_stat_item {
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
};
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
isolate_mode_t mode,
struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file);
struct mem_cgroup_reclaim_cookie {
struct zone *zone;
int priority;
......@@ -69,13 +61,14 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
enum lru_list);
void mem_cgroup_lru_del_list(struct page *, enum lru_list);
void mem_cgroup_lru_del(struct page *);
struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
......@@ -223,33 +216,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}
static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
{
}
static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
struct mem_cgroup *memcg)
{
return ;
return &zone->lruvec;
}
static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
struct page *page,
enum lru_list lru)
{
return ;
return &zone->lruvec;
}
static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{
return ;
}
static inline void mem_cgroup_del_lru(struct page *page)
static inline void mem_cgroup_lru_del(struct page *page)
{
return ;
}
static inline void
mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
{
return &zone->lruvec;
}
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
......
......@@ -21,27 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
static inline void
__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
struct list_head *head)
{
list_add(&page->lru, head);
__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
mem_cgroup_add_lru_list(page, l);
}
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
__add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]);
struct lruvec *lruvec;
lruvec = mem_cgroup_lru_add_list(zone, page, l);
list_add(&page->lru, &lruvec->lists[l]);
__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
}
static inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
mem_cgroup_lru_del_list(page, l);
list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
mem_cgroup_del_lru_list(page, l);
}
/**
......@@ -64,7 +59,6 @@ del_page_from_lru(struct zone *zone, struct page *page)
{
enum lru_list l;
list_del(&page->lru);
if (PageUnevictable(page)) {
__ClearPageUnevictable(page);
l = LRU_UNEVICTABLE;
......@@ -75,8 +69,9 @@ del_page_from_lru(struct zone *zone, struct page *page)
l += LRU_ACTIVE;
}
}
mem_cgroup_lru_del_list(page, l);
list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
mem_cgroup_del_lru_list(page, l);
}
/**
......
......@@ -31,7 +31,6 @@ enum {
struct page_cgroup {
unsigned long flags;
struct mem_cgroup *mem_cgroup;
struct list_head lru; /* per cgroup LRU list */
};
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
......
This diff is collapsed.
......@@ -16,7 +16,6 @@ static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
pc->flags = 0;
set_page_cgroup_array_id(pc, id);
pc->mem_cgroup = NULL;
INIT_LIST_HEAD(&pc->lru);
}
static unsigned long total_usage;
......
......@@ -232,12 +232,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
static void pagevec_move_tail_fn(struct page *page, void *arg)
{
int *pgmoved = arg;
struct zone *zone = page_zone(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_rotate_reclaimable_page(page);
struct lruvec *lruvec;
lruvec = mem_cgroup_lru_move_lists(page_zone(page),
page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++;
}
}
......@@ -476,12 +478,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/
SetPageReclaim(page);
} else {
struct lruvec *lruvec;
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_rotate_reclaimable_page(page);
lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED);
}
......@@ -663,6 +666,8 @@ void lru_add_page_tail(struct zone* zone,
SetPageLRU(page_tail);
if (page_evictable(page_tail, NULL)) {
struct lruvec *lruvec;
if (PageActive(page)) {
SetPageActive(page_tail);
active = 1;
......@@ -672,11 +677,13 @@ void lru_add_page_tail(struct zone* zone,
lru = LRU_INACTIVE_ANON;
}
update_page_reclaim_stat(zone, page_tail, file, active);
lruvec = mem_cgroup_lru_add_list(zone, page_tail, lru);
if (likely(PageLRU(page)))
__add_page_to_lru_list(zone, page_tail, lru,
page->lru.prev);
list_add(&page_tail->lru, page->lru.prev);
else
add_page_to_lru_list(zone, page_tail, lru);
list_add(&page_tail->lru, &lruvec->lists[lru]);
__mod_zone_page_state(zone, NR_LRU_BASE + lru,
hpage_nr_pages(page_tail));
} else {
SetPageUnevictable(page_tail);
add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
......
......@@ -1139,15 +1139,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode, file)) {
case 0:
mem_cgroup_lru_del(page);
list_move(&page->lru, dst);
mem_cgroup_del_lru(page);
nr_taken += hpage_nr_pages(page);
break;
case -EBUSY:
/* else it is being freed elsewhere */
list_move(&page->lru, src);
mem_cgroup_rotate_lru_list(page, page_lru(page));
continue;
default:
......@@ -1197,8 +1196,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
break;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
mem_cgroup_lru_del(cursor_page);
list_move(&cursor_page->lru, dst);
mem_cgroup_del_lru(cursor_page);
nr_taken += hpage_nr_pages(cursor_page);
nr_lumpy_taken++;
if (PageDirty(cursor_page))
......@@ -1239,18 +1238,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
return nr_taken;
}
static unsigned long isolate_pages_global(unsigned long nr,
static unsigned long isolate_pages(unsigned long nr, struct mem_cgroup_zone *mz,
struct list_head *dst,
unsigned long *scanned, int order,
isolate_mode_t mode,
struct zone *z, int active, int file)
isolate_mode_t mode, int active, int file)
{
struct lruvec *lruvec;
int lru = LRU_BASE;
lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
if (active)
lru += LRU_ACTIVE;
if (file)
lru += LRU_FILE;
return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst,
return isolate_lru_pages(nr, &lruvec->lists[lru], dst,
scanned, order, mode, file);
}
......@@ -1518,14 +1519,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(mz)) {
nr_taken = isolate_pages_global(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone, 0, file);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone,
mz->mem_cgroup, 0, file);
}
nr_taken = isolate_pages(nr_to_scan, mz, &page_list,
&nr_scanned, sc->order,
reclaim_mode, 0, file);
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
......@@ -1625,13 +1621,15 @@ static void move_active_pages_to_lru(struct zone *zone,
pagevec_init(&pvec, 1);
while (!list_empty(list)) {
struct lruvec *lruvec;
page = lru_to_page(list);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
list_move(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_add_lru_list(page, lru);
lruvec = mem_cgroup_lru_add_list(zone, page, lru);
list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += hpage_nr_pages(page);
if (!pagevec_add(&pvec, page) || list_empty(list)) {
......@@ -1672,17 +1670,10 @@ static void shrink_active_list(unsigned long nr_pages,
reclaim_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(mz)) {
nr_taken = isolate_pages_global(nr_pages, &l_hold,
&pgscanned, sc->order,
reclaim_mode, zone,
1, file);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
nr_taken = isolate_pages(nr_pages, mz, &l_hold,
&pgscanned, sc->order,
reclaim_mode, zone,
mz->mem_cgroup, 1, file);
}
reclaim_mode, 1, file);
if (global_reclaim(sc))
zone->pages_scanned += pgscanned;
......@@ -3440,16 +3431,18 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
*/
static void check_move_unevictable_page(struct page *page, struct zone *zone)
{
VM_BUG_ON(PageActive(page));
struct lruvec *lruvec;
VM_BUG_ON(PageActive(page));
retry:
ClearPageUnevictable(page);
if (page_evictable(page, NULL)) {
enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lruvec.lists[l]);
mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
lruvec = mem_cgroup_lru_move_lists(zone, page,
LRU_UNEVICTABLE, l);
list_move(&page->lru, &lruvec->lists[l]);
__inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED);
} else {
......@@ -3457,8 +3450,9 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
* rotate unevictable list
*/
SetPageUnevictable(page);
list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]);
mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
LRU_UNEVICTABLE);
list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
if (page_evictable(page, NULL))
goto retry;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment