Commit 7db8889a authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

mm: have order > 0 compaction start off where it left

Order > 0 compaction stops when enough free pages of the correct page
order have been coalesced.  When doing subsequent higher order
allocations, it is possible for compaction to be invoked many times.

However, the compaction code always starts out looking for things to
compact at the start of the zone, and for free pages to compact things to
at the end of the zone.

This can cause quadratic behaviour, with isolate_freepages starting at the
end of the zone each time, even though previous invocations of the
compaction code already filled up all free memory on that end of the zone.

This can cause isolate_freepages to take enormous amounts of CPU with
certain workloads on larger memory systems.

The obvious solution is to have isolate_freepages remember where it left
off last time, and continue at that point the next time it gets invoked
for an order > 0 compaction.  This could cause compaction to fail if
cc->free_pfn and cc->migrate_pfn are close together initially, in that
case we restart from the end of the zone and try once more.

Forced full (order == -1) compactions are left alone.

[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: s/laste/last/, use 80 cols]
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Reported-by: default avatarJim Schutt <jaschut@sandia.gov>
Tested-by: default avatarJim Schutt <jaschut@sandia.gov>
Cc: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ab215884
...@@ -368,6 +368,10 @@ struct zone { ...@@ -368,6 +368,10 @@ struct zone {
*/ */
spinlock_t lock; spinlock_t lock;
int all_unreclaimable; /* All pages pinned */ int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where the last incremental compaction isolated free pages */
unsigned long compact_cached_free_pfn;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */ /* see spanned/present_pages for more description */
seqlock_t span_seqlock; seqlock_t span_seqlock;
......
...@@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone, ...@@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone,
pfn -= pageblock_nr_pages) { pfn -= pageblock_nr_pages) {
unsigned long isolated; unsigned long isolated;
/*
* Skip ahead if another thread is compacting in the area
* simultaneously. If we wrapped around, we can only skip
* ahead if zone->compact_cached_free_pfn also wrapped to
* above our starting point.
*/
if (cc->order > 0 && (!cc->wrapped ||
zone->compact_cached_free_pfn >
cc->start_free_pfn))
pfn = min(pfn, zone->compact_cached_free_pfn);
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
continue; continue;
...@@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone, ...@@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone,
* looking for free pages, the search will restart here as * looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator * page migration may have returned some pages to the allocator
*/ */
if (isolated) if (isolated) {
high_pfn = max(high_pfn, pfn); high_pfn = max(high_pfn, pfn);
if (cc->order > 0)
zone->compact_cached_free_pfn = high_pfn;
}
} }
/* split_free_page does not map the pages */ /* split_free_page does not map the pages */
...@@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, ...@@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
return ISOLATE_SUCCESS; return ISOLATE_SUCCESS;
} }
/*
* Returns the start pfn of the last page block in a zone. This is the starting
* point for full compaction of a zone. Compaction searches for free pages from
* the end of each zone, while isolate_freepages_block scans forward inside each
* page block.
*/
static unsigned long start_free_pfn(struct zone *zone)
{
unsigned long free_pfn;
free_pfn = zone->zone_start_pfn + zone->spanned_pages;
free_pfn &= ~(pageblock_nr_pages-1);
return free_pfn;
}
static int compact_finished(struct zone *zone, static int compact_finished(struct zone *zone,
struct compact_control *cc) struct compact_control *cc)
{ {
...@@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone, ...@@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone,
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return COMPACT_PARTIAL; return COMPACT_PARTIAL;
/* Compaction run completes if the migrate and free scanner meet */ /*
if (cc->free_pfn <= cc->migrate_pfn) * A full (order == -1) compaction run starts at the beginning and
* end of a zone; it completes when the migrate and free scanner meet.
* A partial (order > 0) compaction can start with the free scanner
* at a random point in the zone, and may have to restart.
*/
if (cc->free_pfn <= cc->migrate_pfn) {
if (cc->order > 0 && !cc->wrapped) {
/* We started partway through; restart at the end. */
unsigned long free_pfn = start_free_pfn(zone);
zone->compact_cached_free_pfn = free_pfn;
cc->free_pfn = free_pfn;
cc->wrapped = 1;
return COMPACT_CONTINUE;
}
return COMPACT_COMPLETE;
}
/* We wrapped around and ended up where we started. */
if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
return COMPACT_COMPLETE; return COMPACT_COMPLETE;
/* /*
...@@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
/* Setup to move all movable pages to the end of the zone */ /* Setup to move all movable pages to the end of the zone */
cc->migrate_pfn = zone->zone_start_pfn; cc->migrate_pfn = zone->zone_start_pfn;
cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
cc->free_pfn &= ~(pageblock_nr_pages-1); if (cc->order > 0) {
/* Incremental compaction. Start where the last one stopped. */
cc->free_pfn = zone->compact_cached_free_pfn;
cc->start_free_pfn = cc->free_pfn;
} else {
/* Order == -1 starts at the end of the zone. */
cc->free_pfn = start_free_pfn(zone);
}
migrate_prep_local(); migrate_prep_local();
......
...@@ -118,8 +118,14 @@ struct compact_control { ...@@ -118,8 +118,14 @@ struct compact_control {
unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_freepages; /* Number of isolated free pages */
unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */ unsigned long free_pfn; /* isolate_freepages search base */
unsigned long start_free_pfn; /* where we started the search */
unsigned long migrate_pfn; /* isolate_migratepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */ bool sync; /* Synchronous migration */
bool wrapped; /* Order > 0 compactions are
incremental, once free_pfn
and migrate_pfn meet, we restart
from the top of the zone;
remember we wrapped around. */
int order; /* order a direct compactor needs */ int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */ int migratetype; /* MOVABLE, RECLAIMABLE etc */
......
...@@ -4397,6 +4397,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -4397,6 +4397,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->spanned_pages = size; zone->spanned_pages = size;
zone->present_pages = realsize; zone->present_pages = realsize;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
zone->compact_cached_free_pfn = zone->zone_start_pfn +
zone->spanned_pages;
zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
zone->node = nid; zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment