Commit c277331d authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: page_alloc: clear PG_locked before checking flags on free

da456f14 "page allocator: do not disable interrupts in free_page_mlock()" moved
the PG_mlocked clearing after the flag sanity checking which makes mlocked
pages always trigger 'bad page'.  Fix this by clearing the bit up front.
Reported--and-debugged-by: default avatarPeter Chubb <peter.chubb@nicta.com.au>
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Tested-by: default avatarMaxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9063c61f
...@@ -488,7 +488,6 @@ static inline void __free_one_page(struct page *page, ...@@ -488,7 +488,6 @@ static inline void __free_one_page(struct page *page,
*/ */
static inline void free_page_mlock(struct page *page) static inline void free_page_mlock(struct page *page)
{ {
__ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK); __dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED); __count_vm_event(UNEVICTABLE_MLOCKFREED);
} }
...@@ -558,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -558,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags; unsigned long flags;
int i; int i;
int bad = 0; int bad = 0;
int clearMlocked = PageMlocked(page); int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order); kmemcheck_free_shadow(page, order);
...@@ -576,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -576,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags); local_irq_save(flags);
if (unlikely(clearMlocked)) if (unlikely(wasMlocked))
free_page_mlock(page); free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order); __count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order, free_one_page(page_zone(page), page, order,
...@@ -1022,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1022,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
unsigned long flags; unsigned long flags;
int clearMlocked = PageMlocked(page); int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0); kmemcheck_free_shadow(page, 0);
...@@ -1041,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1041,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp; pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page)); set_page_private(page, get_pageblock_migratetype(page));
local_irq_save(flags); local_irq_save(flags);
if (unlikely(clearMlocked)) if (unlikely(wasMlocked))
free_page_mlock(page); free_page_mlock(page);
__count_vm_event(PGFREE); __count_vm_event(PGFREE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment