Commit 58c37f6e authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

vmscan: protect reading of reclaim_stat with lru_lock

Rik van Riel pointed out reading reclaim_stat should be protected
lru_lock, otherwise vmscan might sweep 2x much pages.

This fault was introduced by

  commit 4f98a2fe
  Author: Rik van Riel <riel@redhat.com>
  Date:   Sat Oct 18 20:26:32 2008 -0700

    vmscan: split LRU lists into anon & file sets
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 15748048
...@@ -1627,6 +1627,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, ...@@ -1627,6 +1627,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
} }
} }
/*
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
anon_prio = sc->swappiness;
file_prio = 200 - sc->swappiness;
/* /*
* OK, so we have swap space and a fair amount of page cache * OK, so we have swap space and a fair amount of page cache
* pages. We use the recently rotated / recently scanned * pages. We use the recently rotated / recently scanned
...@@ -1638,27 +1645,17 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, ...@@ -1638,27 +1645,17 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
* *
* anon in [0], file in [1] * anon in [0], file in [1]
*/ */
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_scanned[0] /= 2;
reclaim_stat->recent_rotated[0] /= 2; reclaim_stat->recent_rotated[0] /= 2;
spin_unlock_irq(&zone->lru_lock);
} }
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
spin_lock_irq(&zone->lru_lock);
reclaim_stat->recent_scanned[1] /= 2; reclaim_stat->recent_scanned[1] /= 2;
reclaim_stat->recent_rotated[1] /= 2; reclaim_stat->recent_rotated[1] /= 2;
spin_unlock_irq(&zone->lru_lock);
} }
/*
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
anon_prio = sc->swappiness;
file_prio = 200 - sc->swappiness;
/* /*
* The amount of pressure on anon vs file pages is inversely * The amount of pressure on anon vs file pages is inversely
* proportional to the fraction of recently scanned pages on * proportional to the fraction of recently scanned pages on
...@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, ...@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1; fp /= reclaim_stat->recent_rotated[1] + 1;
spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap; fraction[0] = ap;
fraction[1] = fp; fraction[1] = fp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment