Commit 3c710c1a authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, vmscan: extract shrink_page_list reclaim counters into a struct

shrink_page_list returns quite some counters back to its caller.
Extract the existing 5 into struct reclaim_stat because this makes the
code easier to follow and also allows further counters to be returned.

While we are at it, make all of them unsigned rather than unsigned long
as we do not really need full 64b for them (we never scan more than
SWAP_CLUSTER_MAX pages at once).  This should reduce some stack space.

This patch shouldn't introduce any functional change.

Link: http://lkml.kernel.org/r/20170104101942.4860-6-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 32b3f297
...@@ -912,6 +912,14 @@ static void page_check_dirty_writeback(struct page *page, ...@@ -912,6 +912,14 @@ static void page_check_dirty_writeback(struct page *page,
mapping->a_ops->is_dirty_writeback(page, dirty, writeback); mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
} }
struct reclaim_stat {
unsigned nr_dirty;
unsigned nr_unqueued_dirty;
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
};
/* /*
* shrink_page_list() returns the number of reclaimed pages * shrink_page_list() returns the number of reclaimed pages
*/ */
...@@ -919,22 +927,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -919,22 +927,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct pglist_data *pgdat, struct pglist_data *pgdat,
struct scan_control *sc, struct scan_control *sc,
enum ttu_flags ttu_flags, enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty, struct reclaim_stat *stat,
unsigned long *ret_nr_unqueued_dirty,
unsigned long *ret_nr_congested,
unsigned long *ret_nr_writeback,
unsigned long *ret_nr_immediate,
bool force_reclaim) bool force_reclaim)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
int pgactivate = 0; int pgactivate = 0;
unsigned long nr_unqueued_dirty = 0; unsigned nr_unqueued_dirty = 0;
unsigned long nr_dirty = 0; unsigned nr_dirty = 0;
unsigned long nr_congested = 0; unsigned nr_congested = 0;
unsigned long nr_reclaimed = 0; unsigned nr_reclaimed = 0;
unsigned long nr_writeback = 0; unsigned nr_writeback = 0;
unsigned long nr_immediate = 0; unsigned nr_immediate = 0;
cond_resched(); cond_resched();
...@@ -1276,11 +1280,13 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1276,11 +1280,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);
*ret_nr_dirty += nr_dirty; if (stat) {
*ret_nr_congested += nr_congested; stat->nr_dirty = nr_dirty;
*ret_nr_unqueued_dirty += nr_unqueued_dirty; stat->nr_congested = nr_congested;
*ret_nr_writeback += nr_writeback; stat->nr_unqueued_dirty = nr_unqueued_dirty;
*ret_nr_immediate += nr_immediate; stat->nr_writeback = nr_writeback;
stat->nr_immediate = nr_immediate;
}
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1292,7 +1298,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1292,7 +1298,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_unmap = 1, .may_unmap = 1,
}; };
unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5; unsigned long ret;
struct page *page, *next; struct page *page, *next;
LIST_HEAD(clean_pages); LIST_HEAD(clean_pages);
...@@ -1305,8 +1311,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1305,8 +1311,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
} }
ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
TTU_UNMAP|TTU_IGNORE_ACCESS, TTU_UNMAP|TTU_IGNORE_ACCESS, NULL, true);
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
list_splice(&clean_pages, page_list); list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
return ret; return ret;
...@@ -1705,11 +1710,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1705,11 +1710,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_scanned; unsigned long nr_scanned;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
unsigned long nr_taken; unsigned long nr_taken;
unsigned long nr_dirty = 0; struct reclaim_stat stat = {};
unsigned long nr_congested = 0;
unsigned long nr_unqueued_dirty = 0;
unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0; isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru); int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec);
...@@ -1754,9 +1755,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1754,9 +1755,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
return 0; return 0;
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP, nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
&nr_dirty, &nr_unqueued_dirty, &nr_congested, &stat, false);
&nr_writeback, &nr_immediate,
false);
spin_lock_irq(&pgdat->lru_lock); spin_lock_irq(&pgdat->lru_lock);
...@@ -1790,7 +1789,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1790,7 +1789,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* of pages under pages flagged for immediate reclaim and stall if any * of pages under pages flagged for immediate reclaim and stall if any
* are encountered in the nr_immediate check below. * are encountered in the nr_immediate check below.
*/ */
if (nr_writeback && nr_writeback == nr_taken) if (stat.nr_writeback && stat.nr_writeback == nr_taken)
set_bit(PGDAT_WRITEBACK, &pgdat->flags); set_bit(PGDAT_WRITEBACK, &pgdat->flags);
/* /*
...@@ -1802,7 +1801,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1802,7 +1801,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* Tag a zone as congested if all the dirty pages scanned were * Tag a zone as congested if all the dirty pages scanned were
* backed by a congested BDI and wait_iff_congested will stall. * backed by a congested BDI and wait_iff_congested will stall.
*/ */
if (nr_dirty && nr_dirty == nr_congested) if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
set_bit(PGDAT_CONGESTED, &pgdat->flags); set_bit(PGDAT_CONGESTED, &pgdat->flags);
/* /*
...@@ -1811,7 +1810,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1811,7 +1810,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* the pgdat PGDAT_DIRTY and kswapd will start writing pages from * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
* reclaim context. * reclaim context.
*/ */
if (nr_unqueued_dirty == nr_taken) if (stat.nr_unqueued_dirty == nr_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags); set_bit(PGDAT_DIRTY, &pgdat->flags);
/* /*
...@@ -1820,7 +1819,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1820,7 +1819,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* that pages are cycling through the LRU faster than * that pages are cycling through the LRU faster than
* they are written so also forcibly stall. * they are written so also forcibly stall.
*/ */
if (nr_immediate && current_may_throttle()) if (stat.nr_immediate && current_may_throttle())
congestion_wait(BLK_RW_ASYNC, HZ/10); congestion_wait(BLK_RW_ASYNC, HZ/10);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment