Commit 9083905a authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: page_alloc: inline should_alloc_retry()

The should_alloc_retry() function was meant to encapsulate retry
conditions of the allocator slowpath, but there are still checks
remaining in the main function, and much of how the retrying is
performed also depends on the OOM killer progress.  The physical
separation of those conditions make the code hard to follow.

Inline the should_alloc_retry() checks.  Notes:

- The __GFP_NOFAIL check is already done in __alloc_pages_may_oom(),
  replace it with looping on OOM killer progress

- The pm_suspended_storage() check is meant to skip the OOM killer
  when reclaim has no IO available, move to __alloc_pages_may_oom()

- The order <= PAGE_ALLOC_COSTLY order is re-united with its original
  counterpart of checking whether reclaim actually made any progress
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dc56401f
...@@ -2309,48 +2309,6 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) ...@@ -2309,48 +2309,6 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
show_mem(filter); show_mem(filter);
} }
static inline int
should_alloc_retry(gfp_t gfp_mask, unsigned int order,
unsigned long did_some_progress,
unsigned long pages_reclaimed)
{
/* Do not loop if specifically requested */
if (gfp_mask & __GFP_NORETRY)
return 0;
/* Always retry if specifically requested */
if (gfp_mask & __GFP_NOFAIL)
return 1;
/*
* Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
* making forward progress without invoking OOM. Suspend also disables
* storage devices so kswapd will not help. Bail if we are suspending.
*/
if (!did_some_progress && pm_suspended_storage())
return 0;
/*
* In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
* means __GFP_NOFAIL, but that may not be true in other
* implementations.
*/
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return 1;
/*
* For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
* specified, then we retry until we no longer reclaim any pages
* (above), or we've reclaimed an order of pages at least as
* large as the allocation's order. In both cases, if the
* allocation still fails, we stop retrying.
*/
if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
return 1;
return 0;
}
static inline struct page * static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
const struct alloc_context *ac, unsigned long *did_some_progress) const struct alloc_context *ac, unsigned long *did_some_progress)
...@@ -2389,16 +2347,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -2389,16 +2347,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
/* The OOM killer does not needlessly kill tasks for lowmem */ /* The OOM killer does not needlessly kill tasks for lowmem */
if (ac->high_zoneidx < ZONE_NORMAL) if (ac->high_zoneidx < ZONE_NORMAL)
goto out; goto out;
/* The OOM killer does not compensate for light reclaim */ /* The OOM killer does not compensate for IO-less reclaim */
if (!(gfp_mask & __GFP_FS)) { if (!(gfp_mask & __GFP_FS)) {
/* /*
* XXX: Page reclaim didn't yield anything, * XXX: Page reclaim didn't yield anything,
* and the OOM killer can't be invoked, but * and the OOM killer can't be invoked, but
* keep looping as per should_alloc_retry(). * keep looping as per tradition.
*/ */
*did_some_progress = 1; *did_some_progress = 1;
goto out; goto out;
} }
if (pm_suspended_storage())
goto out;
/* The OOM killer may not free memory on a specific node */ /* The OOM killer may not free memory on a specific node */
if (gfp_mask & __GFP_THISNODE) if (gfp_mask & __GFP_THISNODE)
goto out; goto out;
...@@ -2781,40 +2741,40 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2781,40 +2741,40 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (page) if (page)
goto got_pg; goto got_pg;
/* Check if we should retry the allocation */ /* Do not loop if specifically requested */
if (gfp_mask & __GFP_NORETRY)
goto noretry;
/* Keep reclaiming pages as long as there is reasonable progress */
pages_reclaimed += did_some_progress; pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, did_some_progress, if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
pages_reclaimed)) { ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
/*
* If we fail to make progress by freeing individual
* pages, but the allocation wants us to keep going,
* start OOM killing tasks.
*/
if (!did_some_progress) {
page = __alloc_pages_may_oom(gfp_mask, order, ac,
&did_some_progress);
if (page)
goto got_pg;
if (!did_some_progress)
goto nopage;
}
/* Wait for some write requests to complete then retry */ /* Wait for some write requests to complete then retry */
wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
goto retry; goto retry;
} else { }
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
if (page)
goto got_pg;
/* Retry as long as the OOM killer is making progress */
if (did_some_progress)
goto retry;
noretry:
/* /*
* High-order allocations do not necessarily loop after * High-order allocations do not necessarily loop after
* direct reclaim and reclaim/compaction depends on compaction * direct reclaim and reclaim/compaction depends on compaction
* being called after reclaim so call directly if necessary * being called after reclaim so call directly if necessary
*/ */
page = __alloc_pages_direct_compact(gfp_mask, order, page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
alloc_flags, ac, migration_mode, ac, migration_mode,
&contended_compaction, &contended_compaction,
&deferred_compaction); &deferred_compaction);
if (page) if (page)
goto got_pg; goto got_pg;
}
nopage: nopage:
warn_alloc_failed(gfp_mask, order, NULL); warn_alloc_failed(gfp_mask, order, NULL);
got_pg: got_pg:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment