Commit 785b99fe authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, vmscan: release/reacquire lru_lock on pgdat change

With node-lru, the locking is based on the pgdat.  As Minchan pointed
out, there is an opportunity to reduce LRU lock release/acquire in
check_move_unevictable_pages by only changing lock on a pgdat change.

[mgorman@techsingularity.net: remove double initialisation]
  Link: http://lkml.kernel.org/r/20160719074835.GC10438@techsingularity.net
Link: http://lkml.kernel.org/r/1468853426-12858-3-git-send-email-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 22fecdf5
...@@ -3774,24 +3774,23 @@ int page_evictable(struct page *page) ...@@ -3774,24 +3774,23 @@ int page_evictable(struct page *page)
void check_move_unevictable_pages(struct page **pages, int nr_pages) void check_move_unevictable_pages(struct page **pages, int nr_pages)
{ {
struct lruvec *lruvec; struct lruvec *lruvec;
struct zone *zone = NULL; struct pglist_data *pgdat = NULL;
int pgscanned = 0; int pgscanned = 0;
int pgrescued = 0; int pgrescued = 0;
int i; int i;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
struct zone *pagezone; struct pglist_data *pagepgdat = page_pgdat(page);
pgscanned++; pgscanned++;
pagezone = page_zone(page); if (pagepgdat != pgdat) {
if (pagezone != zone) { if (pgdat)
if (zone) spin_unlock_irq(&pgdat->lru_lock);
spin_unlock_irq(zone_lru_lock(zone)); pgdat = pagepgdat;
zone = pagezone; spin_lock_irq(&pgdat->lru_lock);
spin_lock_irq(zone_lru_lock(zone));
} }
lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); lruvec = mem_cgroup_page_lruvec(page, pgdat);
if (!PageLRU(page) || !PageUnevictable(page)) if (!PageLRU(page) || !PageUnevictable(page))
continue; continue;
...@@ -3807,10 +3806,10 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -3807,10 +3806,10 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
} }
} }
if (zone) { if (pgdat) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
spin_unlock_irq(zone_lru_lock(zone)); spin_unlock_irq(&pgdat->lru_lock);
} }
} }
#endif /* CONFIG_SHMEM */ #endif /* CONFIG_SHMEM */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment