Commit d5bc5fd3 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

mm: vmscan: shrink_slab: rename max_pass -> freeable

The name `max_pass' is misleading, because this variable actually keeps
the estimate number of freeable objects, not the maximal number of
objects we can scan in this pass, which can be twice that.  Rename it to
reflect its actual meaning.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8382d914
...@@ -224,15 +224,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -224,15 +224,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
unsigned long freed = 0; unsigned long freed = 0;
unsigned long long delta; unsigned long long delta;
long total_scan; long total_scan;
long max_pass; long freeable;
long nr; long nr;
long new_nr; long new_nr;
int nid = shrinkctl->nid; int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH; : SHRINK_BATCH;
max_pass = shrinker->count_objects(shrinker, shrinkctl); freeable = shrinker->count_objects(shrinker, shrinkctl);
if (max_pass == 0) if (freeable == 0)
return 0; return 0;
/* /*
...@@ -244,14 +244,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -244,14 +244,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
total_scan = nr; total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks; delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass; delta *= freeable;
do_div(delta, lru_pages + 1); do_div(delta, lru_pages + 1);
total_scan += delta; total_scan += delta;
if (total_scan < 0) { if (total_scan < 0) {
printk(KERN_ERR printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n", "shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->scan_objects, total_scan); shrinker->scan_objects, total_scan);
total_scan = max_pass; total_scan = freeable;
} }
/* /*
...@@ -260,26 +260,26 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -260,26 +260,26 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
* shrinkers to return -1 all the time. This results in a large * shrinkers to return -1 all the time. This results in a large
* nr being built up so when a shrink that can do some work * nr being built up so when a shrink that can do some work
* comes along it empties the entire cache due to nr >>> * comes along it empties the entire cache due to nr >>>
* max_pass. This is bad for sustaining a working set in * freeable. This is bad for sustaining a working set in
* memory. * memory.
* *
* Hence only allow the shrinker to scan the entire cache when * Hence only allow the shrinker to scan the entire cache when
* a large delta change is calculated directly. * a large delta change is calculated directly.
*/ */
if (delta < max_pass / 4) if (delta < freeable / 4)
total_scan = min(total_scan, max_pass / 2); total_scan = min(total_scan, freeable / 2);
/* /*
* Avoid risking looping forever due to too large nr value: * Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of * never try to free more than twice the estimate number of
* freeable entries. * freeable entries.
*/ */
if (total_scan > max_pass * 2) if (total_scan > freeable * 2)
total_scan = max_pass * 2; total_scan = freeable * 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
nr_pages_scanned, lru_pages, nr_pages_scanned, lru_pages,
max_pass, delta, total_scan); freeable, delta, total_scan);
/* /*
* Normally, we should not scan less than batch_size objects in one * Normally, we should not scan less than batch_size objects in one
...@@ -292,12 +292,12 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -292,12 +292,12 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
* *
* We detect the "tight on memory" situations by looking at the total * We detect the "tight on memory" situations by looking at the total
* number of objects we want to scan (total_scan). If it is greater * number of objects we want to scan (total_scan). If it is greater
* than the total number of objects on slab (max_pass), we must be * than the total number of objects on slab (freeable), we must be
* scanning at high prio and therefore should try to reclaim as much as * scanning at high prio and therefore should try to reclaim as much as
* possible. * possible.
*/ */
while (total_scan >= batch_size || while (total_scan >= batch_size ||
total_scan >= max_pass) { total_scan >= freeable) {
unsigned long ret; unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan); unsigned long nr_to_scan = min(batch_size, total_scan);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment