Commit 2af120bc authored by Laura Abbott's avatar Laura Abbott Committed by Linus Torvalds

mm/compaction: break out of loop on !PageBuddy in isolate_freepages_block

We received several reports of bad page state when freeing CMA pages
previously allocated with alloc_contig_range:

    BUG: Bad page state in process Binder_A  pfn:63202
    page:d21130b0 count:0 mapcount:1 mapping:  (null) index:0x7dfbf
    page flags: 0x40080068(uptodate|lru|active|swapbacked)

Based on the page state, it looks like the page was still in use.  The
page flags do not make sense for the use case though.  Further debugging
showed that despite alloc_contig_range returning success, at least one
page in the range still remained in the buddy allocator.

There is an issue with isolate_freepages_block.  In strict mode (which
CMA uses), if any pages in the range cannot be isolated,
isolate_freepages_block should return failure 0.  The current check
keeps track of the total number of isolated pages and compares against
the size of the range:

        if (strict && nr_strict_required > total_isolated)
                total_isolated = 0;

After taking the zone lock, if one of the pages in the range is not in
the buddy allocator, we continue through the loop and do not increment
total_isolated.  If in the last iteration of the loop we isolate more
than one page (e.g.  last page needed is a higher order page), the check
for total_isolated may pass and we fail to detect that a page was
skipped.  The fix is to bail out if the loop immediately if we are in
strict mode.  There's no benfit to continuing anyway since we need all
pages to be isolated.  Additionally, drop the error checking based on
nr_strict_required and just check the pfn ranges.  This matches with
what isolate_freepages_range does.
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e97ca8e5
...@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
{ {
int nr_scanned = 0, total_isolated = 0; int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL; struct page *cursor, *valid_page = NULL;
unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags; unsigned long flags;
bool locked = false; bool locked = false;
...@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned++; nr_scanned++;
if (!pfn_valid_within(blockpfn)) if (!pfn_valid_within(blockpfn))
continue; goto isolate_fail;
if (!valid_page) if (!valid_page)
valid_page = page; valid_page = page;
if (!PageBuddy(page)) if (!PageBuddy(page))
continue; goto isolate_fail;
/* /*
* The zone lock must be held to isolate freepages. * The zone lock must be held to isolate freepages.
...@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Recheck this is a buddy page under lock */ /* Recheck this is a buddy page under lock */
if (!PageBuddy(page)) if (!PageBuddy(page))
continue; goto isolate_fail;
/* Found a free page, break it into order-0 pages */ /* Found a free page, break it into order-0 pages */
isolated = split_free_page(page); isolated = split_free_page(page);
if (!isolated && strict)
break;
total_isolated += isolated; total_isolated += isolated;
for (i = 0; i < isolated; i++) { for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist); list_add(&page->lru, freelist);
...@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (isolated) { if (isolated) {
blockpfn += isolated - 1; blockpfn += isolated - 1;
cursor += isolated - 1; cursor += isolated - 1;
continue;
} }
isolate_fail:
if (strict)
break;
else
continue;
} }
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
...@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* pages requested were isolated. If there were any failures, 0 is * pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail. * returned and CMA will fail.
*/ */
if (strict && nr_strict_required > total_isolated) if (strict && blockpfn < end_pfn)
total_isolated = 0; total_isolated = 0;
if (locked) if (locked)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment