Commit af24516e authored by Andrea Arcangeli's avatar Andrea Arcangeli

mm: zone_reclaim: compaction: don't depend on kswapd to invoke reset_isolation_suitable

If kswapd never need to run (only __GFP_NO_KSWAPD allocations and
plenty of free memory) compaction is otherwise crippled down and stops
running for a while after the free/isolation cursor meets. After that
allocation can fail for a full cycle of compaction_deferred, until
compaction_restarting finally reset it again.

Stopping compaction for a full cycle after the cursor meets, even if
it never failed and it's not going to fail, doesn't make sense.

We already throttle compaction CPU utilization using
defer_compaction. We shouldn't prevent compaction to run after each
pass completes when the cursor meets, unless it failed.

This makes direct compaction functional again. The throttling of
direct compaction is still controlled by the defer_compaction
logic.

kswapd still won't risk to reset compaction, and it will wait direct
compaction to do so. Not sure if this is ideal but it at least
decreases the risk of kswapd doing too much work. kswapd will only run
one pass of compaction until some allocation invokes compaction again.

This decreased reliability of compaction was introduced in commit
62997027 .
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarRafael Aquini <aquini@redhat.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
parent 02eaa78b
......@@ -41,7 +41,6 @@ extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx);
......@@ -64,10 +63,6 @@ static inline void compact_pgdat(pg_data_t *pgdat, int order)
{
}
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
static inline unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
......
......@@ -519,11 +519,6 @@ struct zone {
int compact_order_failed;
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
#endif
ZONE_PADDING(_pad3_)
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
......
......@@ -221,7 +221,6 @@ static void __reset_isolation_suitable(struct zone *zone)
zone->compact_cached_migrate_pfn[0] = start_pfn;
zone->compact_cached_migrate_pfn[1] = start_pfn;
zone->compact_cached_free_pfn = end_pfn;
zone->compact_blockskip_flush = false;
/* Walk the zone and mark every pageblock as suitable for isolation */
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
......@@ -240,7 +239,7 @@ static void __reset_isolation_suitable(struct zone *zone)
}
}
void reset_isolation_suitable(pg_data_t *pgdat)
static void reset_isolation_suitable(pg_data_t *pgdat)
{
int zoneid;
......@@ -250,8 +249,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
continue;
/* Only flush if a full compaction finished recently */
if (zone->compact_blockskip_flush)
__reset_isolation_suitable(zone);
__reset_isolation_suitable(zone);
}
}
......@@ -1154,13 +1152,12 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
zone->compact_cached_free_pfn = zone_end_pfn(zone);
/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kswapd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
* Clear the PG_migrate_skip information. kswapd does
* not clear it as the decision to be clear should be
* directly based on an allocation request.
*/
if (!current_is_kswapd())
zone->compact_blockskip_flush = true;
__reset_isolation_suitable(zone);
return COMPACT_COMPLETE;
}
......
......@@ -2505,7 +2505,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
if (page) {
struct zone *zone = page_zone(page);
zone->compact_blockskip_flush = false;
compaction_defer_reset(zone, order, true);
count_vm_event(COMPACTSUCCESS);
return page;
......
......@@ -3314,14 +3314,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
/*
* Compaction records what page blocks it recently failed to
* isolate pages from and skips them in the future scanning.
* When kswapd is going to sleep, it is reasonable to assume
* that pages and compaction may succeed so reset the cache.
*/
reset_isolation_suitable(pgdat);
if (!kthread_should_stop())
schedule();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment