Commit 6290df54 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: collect LRU list heads into struct lruvec

Having a unified structure with a LRU list set for both global zones and
per-memcg zones allows to keep that code simple which deals with LRU
lists and does not care about the container itself.

Once the per-memcg LRU lists directly link struct pages, the isolation
function and all other list manipulations are shared between the memcg
case and the global LRU case.
Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b95a2f2d
...@@ -33,7 +33,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, ...@@ -33,7 +33,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
static inline void static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{ {
__add_page_to_lru_list(zone, page, l, &zone->lru[l].list); __add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]);
} }
static inline void static inline void
......
...@@ -159,6 +159,10 @@ static inline int is_unevictable_lru(enum lru_list l) ...@@ -159,6 +159,10 @@ static inline int is_unevictable_lru(enum lru_list l)
return (l == LRU_UNEVICTABLE); return (l == LRU_UNEVICTABLE);
} }
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
};
/* Mask used at gathering information at once (see memcontrol.c) */ /* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
...@@ -365,9 +369,7 @@ struct zone { ...@@ -365,9 +369,7 @@ struct zone {
/* Fields commonly accessed by the page reclaim scanner */ /* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock; spinlock_t lru_lock;
struct zone_lru { struct lruvec lruvec;
struct list_head list;
} lru[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat; struct zone_reclaim_stat reclaim_stat;
......
...@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter { ...@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter {
* per-zone information in memory controller. * per-zone information in memory controller.
*/ */
struct mem_cgroup_per_zone { struct mem_cgroup_per_zone {
/* struct lruvec lruvec;
* spin_lock to protect the per cgroup LRU
*/
struct list_head lists[NR_LRU_LISTS];
unsigned long count[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
...@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page) ...@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb(); smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move_tail(&pc->lru, &mz->lists[lru]); list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
} }
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
...@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) ...@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb(); smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move(&pc->lru, &mz->lists[lru]); list_move(&pc->lru, &mz->lruvec.lists[lru]);
} }
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
...@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) ...@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
/* huge page split is done under lru_lock. so, we have no races. */ /* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
SetPageCgroupAcctLRU(pc); SetPageCgroupAcctLRU(pc);
list_add(&pc->lru, &mz->lists[lru]); list_add(&pc->lru, &mz->lruvec.lists[lru]);
} }
/* /*
...@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, ...@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
BUG_ON(!mem_cont); BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
src = &mz->lists[lru]; src = &mz->lruvec.lists[lru];
scan = 0; scan = 0;
list_for_each_entry_safe_reverse(pc, tmp, src, lru) { list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
...@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, ...@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
zone = &NODE_DATA(node)->node_zones[zid]; zone = &NODE_DATA(node)->node_zones[zid];
mz = mem_cgroup_zoneinfo(memcg, node, zid); mz = mem_cgroup_zoneinfo(memcg, node, zid);
list = &mz->lists[lru]; list = &mz->lruvec.lists[lru];
loop = MEM_CGROUP_ZSTAT(mz, lru); loop = MEM_CGROUP_ZSTAT(mz, lru);
/* give some margin against EBUSY etc...*/ /* give some margin against EBUSY etc...*/
...@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) ...@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) { for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone]; mz = &pn->zoneinfo[zone];
for_each_lru(l) for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]); INIT_LIST_HEAD(&mz->lruvec.lists[l]);
mz->usage_in_excess = 0; mz->usage_in_excess = 0;
mz->on_tree = false; mz->on_tree = false;
mz->mem = memcg; mz->mem = memcg;
......
...@@ -4288,7 +4288,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -4288,7 +4288,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_pcp_init(zone); zone_pcp_init(zone);
for_each_lru(l) for_each_lru(l)
INIT_LIST_HEAD(&zone->lru[l].list); INIT_LIST_HEAD(&zone->lruvec.lists[l]);
zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0; zone->reclaim_stat.recent_rotated[1] = 0;
zone->reclaim_stat.recent_scanned[0] = 0; zone->reclaim_stat.recent_scanned[0] = 0;
......
...@@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg) ...@@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg)
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page); enum lru_list lru = page_lru_base_type(page);
list_move_tail(&page->lru, &zone->lru[lru].list); list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_rotate_reclaimable_page(page); mem_cgroup_rotate_reclaimable_page(page);
(*pgmoved)++; (*pgmoved)++;
} }
...@@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg) ...@@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg)
* The page's writeback ends up during pagevec * The page's writeback ends up during pagevec
* We moves tha page into tail of inactive. * We moves tha page into tail of inactive.
*/ */
list_move_tail(&page->lru, &zone->lru[lru].list); list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_rotate_reclaimable_page(page); mem_cgroup_rotate_reclaimable_page(page);
__count_vm_event(PGROTATED); __count_vm_event(PGROTATED);
} }
...@@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone, ...@@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone,
int active; int active;
enum lru_list lru; enum lru_list lru;
const int file = 0; const int file = 0;
struct list_head *head;
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageCompound(page_tail));
...@@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone, ...@@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone,
} }
update_page_reclaim_stat(zone, page_tail, file, active); update_page_reclaim_stat(zone, page_tail, file, active);
if (likely(PageLRU(page))) if (likely(PageLRU(page)))
head = page->lru.prev; __add_page_to_lru_list(zone, page_tail, lru,
page->lru.prev);
else else
head = &zone->lru[lru].list; add_page_to_lru_list(zone, page_tail, lru);
__add_page_to_lru_list(zone, page_tail, lru, head);
} else { } else {
SetPageUnevictable(page_tail); SetPageUnevictable(page_tail);
add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
......
...@@ -1250,8 +1250,8 @@ static unsigned long isolate_pages_global(unsigned long nr, ...@@ -1250,8 +1250,8 @@ static unsigned long isolate_pages_global(unsigned long nr,
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
if (file) if (file)
lru += LRU_FILE; lru += LRU_FILE;
return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst,
mode, file); scanned, order, mode, file);
} }
/* /*
...@@ -1630,7 +1630,7 @@ static void move_active_pages_to_lru(struct zone *zone, ...@@ -1630,7 +1630,7 @@ static void move_active_pages_to_lru(struct zone *zone,
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
list_move(&page->lru, &zone->lru[lru].list); list_move(&page->lru, &zone->lruvec.lists[lru]);
mem_cgroup_add_lru_list(page, lru); mem_cgroup_add_lru_list(page, lru);
pgmoved += hpage_nr_pages(page); pgmoved += hpage_nr_pages(page);
...@@ -3448,7 +3448,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) ...@@ -3448,7 +3448,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
enum lru_list l = page_lru_base_type(page); enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE); __dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list); list_move(&page->lru, &zone->lruvec.lists[l]);
mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
__inc_zone_state(zone, NR_INACTIVE_ANON + l); __inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED); __count_vm_event(UNEVICTABLE_PGRESCUED);
...@@ -3457,7 +3457,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) ...@@ -3457,7 +3457,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
* rotate unevictable list * rotate unevictable list
*/ */
SetPageUnevictable(page); SetPageUnevictable(page);
list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]);
mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
if (page_evictable(page, NULL)) if (page_evictable(page, NULL))
goto retry; goto retry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment