Commit a0b02131 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

shrinker: Kill old ->shrink API.

There are no more users of this API, so kill it dead, dead, dead and
quietly bury the corpse in a shallow, unmarked grave in a dark forest deep
in the hills...

[glommer@openvz.org: added flowers to the grave]
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Reviewed-by: default avatarGreg Thelen <gthelen@google.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 70534a73
...@@ -7,14 +7,15 @@ ...@@ -7,14 +7,15 @@
* *
* The 'gfpmask' refers to the allocation we are currently trying to * The 'gfpmask' refers to the allocation we are currently trying to
* fulfil. * fulfil.
*
* Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
* querying the cache size, so a fastpath for that case is appropriate.
*/ */
struct shrink_control { struct shrink_control {
gfp_t gfp_mask; gfp_t gfp_mask;
/* How many slab objects shrinker() should scan and try to reclaim */ /*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
* to modify.
*/
unsigned long nr_to_scan; unsigned long nr_to_scan;
/* shrink from these nodes */ /* shrink from these nodes */
...@@ -27,11 +28,6 @@ struct shrink_control { ...@@ -27,11 +28,6 @@ struct shrink_control {
/* /*
* A callback you can register to apply pressure to ageable caches. * A callback you can register to apply pressure to ageable caches.
* *
* @shrink() should look through the least-recently-used 'nr_to_scan' entries
* and attempt to free them up. It should return the number of objects which
* remain in the cache. If it returns -1, it means it cannot do any scanning at
* this time (eg. there is a risk of deadlock).
*
* @count_objects should return the number of freeable items in the cache. If * @count_objects should return the number of freeable items in the cache. If
* there are no objects to free or the number of freeable items cannot be * there are no objects to free or the number of freeable items cannot be
* determined, it should return 0. No deadlock checks should be done during the * determined, it should return 0. No deadlock checks should be done during the
...@@ -50,7 +46,6 @@ struct shrink_control { ...@@ -50,7 +46,6 @@ struct shrink_control {
* @flags determine the shrinker abilities, like numa awareness * @flags determine the shrinker abilities, like numa awareness
*/ */
struct shrinker { struct shrinker {
int (*shrink)(struct shrinker *, struct shrink_control *sc);
unsigned long (*count_objects)(struct shrinker *, unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc); struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *, unsigned long (*scan_objects)(struct shrinker *,
......
...@@ -202,7 +202,7 @@ TRACE_EVENT(mm_shrink_slab_start, ...@@ -202,7 +202,7 @@ TRACE_EVENT(mm_shrink_slab_start,
TP_fast_assign( TP_fast_assign(
__entry->shr = shr; __entry->shr = shr;
__entry->shrink = shr->shrink; __entry->shrink = shr->scan_objects;
__entry->nr_objects_to_shrink = nr_objects_to_shrink; __entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask; __entry->gfp_flags = sc->gfp_mask;
__entry->pgs_scanned = pgs_scanned; __entry->pgs_scanned = pgs_scanned;
...@@ -241,7 +241,7 @@ TRACE_EVENT(mm_shrink_slab_end, ...@@ -241,7 +241,7 @@ TRACE_EVENT(mm_shrink_slab_end,
TP_fast_assign( TP_fast_assign(
__entry->shr = shr; __entry->shr = shr;
__entry->shrink = shr->shrink; __entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt; __entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt; __entry->new_scan = new_scan_cnt;
__entry->retval = shrinker_retval; __entry->retval = shrinker_retval;
......
...@@ -194,14 +194,6 @@ void unregister_shrinker(struct shrinker *shrinker) ...@@ -194,14 +194,6 @@ void unregister_shrinker(struct shrinker *shrinker)
} }
EXPORT_SYMBOL(unregister_shrinker); EXPORT_SYMBOL(unregister_shrinker);
static inline int do_shrinker_shrink(struct shrinker *shrinker,
struct shrink_control *sc,
unsigned long nr_to_scan)
{
sc->nr_to_scan = nr_to_scan;
return (*shrinker->shrink)(shrinker, sc);
}
#define SHRINK_BATCH 128 #define SHRINK_BATCH 128
static unsigned long static unsigned long
...@@ -218,10 +210,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -218,10 +210,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
long batch_size = shrinker->batch ? shrinker->batch long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH; : SHRINK_BATCH;
if (shrinker->count_objects) max_pass = shrinker->count_objects(shrinker, shrinkctl);
max_pass = shrinker->count_objects(shrinker, shrinkctl);
else
max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0);
if (max_pass == 0) if (max_pass == 0)
return 0; return 0;
...@@ -240,7 +229,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -240,7 +229,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
if (total_scan < 0) { if (total_scan < 0) {
printk(KERN_ERR printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n", "shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->shrink, total_scan); shrinker->scan_objects, total_scan);
total_scan = max_pass; total_scan = max_pass;
} }
...@@ -272,27 +261,13 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, ...@@ -272,27 +261,13 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
max_pass, delta, total_scan); max_pass, delta, total_scan);
while (total_scan >= batch_size) { while (total_scan >= batch_size) {
unsigned long ret;
if (shrinker->scan_objects) { shrinkctl->nr_to_scan = batch_size;
unsigned long ret; ret = shrinker->scan_objects(shrinker, shrinkctl);
shrinkctl->nr_to_scan = batch_size; if (ret == SHRINK_STOP)
ret = shrinker->scan_objects(shrinker, shrinkctl); break;
freed += ret;
if (ret == SHRINK_STOP)
break;
freed += ret;
} else {
int nr_before;
long ret;
nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0);
ret = do_shrinker_shrink(shrinker, shrinkctl,
batch_size);
if (ret == -1)
break;
if (ret < nr_before)
freed += nr_before - ret;
}
count_vm_events(SLABS_SCANNED, batch_size); count_vm_events(SLABS_SCANNED, batch_size);
total_scan -= batch_size; total_scan -= batch_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment