Commit 29fac03b authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: make unreserve highatomic functions reliable

Currently, unreserve_highatomic_pageblock bails out if it found
highatomic pageblock regardless of really moving free pages from the one
so that it could mitigate unreserve logic's goal which saves OOM of a
process.

This patch makes unreserve functions bail out only if it moves some
pages out of !highatomic free list to avoid such false positive.

Another potential problem is that by race between page freeing and
reserve highatomic function, pages could be in highatomic free list even
though the pageblock is !high atomic migratetype.  In that case,
unreserve_highatomic_pageblock can be void if count of highatomic
reserve is less than pageblock_nr_pages.  We could solve it simply via
draining all of reserved pages before the OOM.  It would have a
safeguard role to exhuast reserved pages before converging to OOM.

Link: http://lkml.kernel.org/r/1476259429-18279-5-git-send-email-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Sangseok Lee <sangseok.lee@lge.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 04c8716f
...@@ -2058,8 +2058,12 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, ...@@ -2058,8 +2058,12 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
* potentially hurts the reliability of high-order allocations when under * potentially hurts the reliability of high-order allocations when under
* intense memory pressure but failed atomic allocations should be easier * intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM. * to recover from than an OOM.
*
* If @force is true, try to unreserve a pageblock even though highatomic
* pageblock is exhausted.
*/ */
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac) static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
bool force)
{ {
struct zonelist *zonelist = ac->zonelist; struct zonelist *zonelist = ac->zonelist;
unsigned long flags; unsigned long flags;
...@@ -2071,8 +2075,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac) ...@@ -2071,8 +2075,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac)
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
ac->nodemask) { ac->nodemask) {
/* Preserve at least one pageblock */ /*
if (zone->nr_reserved_highatomic <= pageblock_nr_pages) * Preserve at least one pageblock unless memory pressure
* is really high.
*/
if (!force && zone->nr_reserved_highatomic <=
pageblock_nr_pages)
continue; continue;
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
...@@ -2117,8 +2125,10 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac) ...@@ -2117,8 +2125,10 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac)
*/ */
set_pageblock_migratetype(page, ac->migratetype); set_pageblock_migratetype(page, ac->migratetype);
ret = move_freepages_block(zone, page, ac->migratetype); ret = move_freepages_block(zone, page, ac->migratetype);
spin_unlock_irqrestore(&zone->lock, flags); if (ret) {
return ret; spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
} }
...@@ -3322,7 +3332,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -3322,7 +3332,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
* Shrink them them and try again * Shrink them them and try again
*/ */
if (!page && !drained) { if (!page && !drained) {
unreserve_highatomic_pageblock(ac); unreserve_highatomic_pageblock(ac, false);
drain_all_pages(NULL); drain_all_pages(NULL);
drained = true; drained = true;
goto retry; goto retry;
...@@ -3441,7 +3451,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order, ...@@ -3441,7 +3451,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
*/ */
if (*no_progress_loops > MAX_RECLAIM_RETRIES) { if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
/* Before OOM, exhaust highatomic_reserve */ /* Before OOM, exhaust highatomic_reserve */
return unreserve_highatomic_pageblock(ac); return unreserve_highatomic_pageblock(ac, true);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment