Commit b35ea17b authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

mm: shrink_inactive_list() nr_scan accounting fix fix

If sc->isolate_pages() return 0, we don't need to call shrink_page_list().
In past days, shrink_inactive_list() handled it properly.

But commit fb8d14e1 (three years ago commit!) breaked it.  current
shrink_inactive_list() always call shrink_page_list() although
isolate_pages() return 0.

This patch restore proper return value check.

Requirements:
  o "nr_taken == 0" condition should stay before calling shrink_page_list().
  o "nr_taken == 0" condition should stay after nr_scan related statistics
     modification.

Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 44c241f1
...@@ -1076,6 +1076,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1076,6 +1076,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
nr_taken = sc->isolate_pages(sc->swap_cluster_max, nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order, mode, &page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file); zone, sc->mem_cgroup, 0, file);
if (scanning_global_lru(sc)) {
zone->pages_scanned += nr_scan;
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone,
nr_scan);
else
__count_zone_vm_events(PGSCAN_DIRECT, zone,
nr_scan);
}
if (nr_taken == 0)
goto done;
nr_active = clear_active_flags(&page_list, count); nr_active = clear_active_flags(&page_list, count);
__count_vm_events(PGDEACTIVATE, nr_active); __count_vm_events(PGDEACTIVATE, nr_active);
...@@ -1088,8 +1102,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1088,8 +1102,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON, __mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]); -count[LRU_INACTIVE_ANON]);
if (scanning_global_lru(sc))
zone->pages_scanned += nr_scan;
reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
...@@ -1123,18 +1135,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1123,18 +1135,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
} }
nr_reclaimed += nr_freed; nr_reclaimed += nr_freed;
local_irq_disable(); local_irq_disable();
if (current_is_kswapd()) { if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
__count_vm_events(KSWAPD_STEAL, nr_freed); __count_vm_events(KSWAPD_STEAL, nr_freed);
} else if (scanning_global_lru(sc))
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
__count_zone_vm_events(PGSTEAL, zone, nr_freed); __count_zone_vm_events(PGSTEAL, zone, nr_freed);
if (nr_taken == 0)
goto done;
spin_lock(&zone->lru_lock); spin_lock(&zone->lru_lock);
/* /*
* Put back any unfreeable pages. * Put back any unfreeable pages.
...@@ -1164,9 +1170,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1164,9 +1170,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
} }
} }
} while (nr_scanned < max_scan); } while (nr_scanned < max_scan);
spin_unlock(&zone->lru_lock);
done: done:
local_irq_enable(); spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec); pagevec_release(&pvec);
return nr_reclaimed; return nr_reclaimed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment