Commit 4e611801 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, page_alloc: uninline the bad page part of check_new_page()

Bad pages should be rare so the code handling them doesn't need to be
inline for performance reasons.  Put it to separate function which
returns void.  This also assumes that the initial page_expected_state()
result will match the result of the thorough check, i.e.  the page
doesn't become "good" in the meanwhile.  This matches the same
expectations already in place in free_pages_check().

!DEBUG_VM bloat-o-meter:

  add/remove: 1/0 grow/shrink: 0/1 up/down: 134/-274 (-140)
  function                                     old     new   delta
  check_new_page_bad                             -     134    +134
  get_page_from_freelist                      3468    3194    -274
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e2769dbd
...@@ -1647,19 +1647,11 @@ static inline void expand(struct zone *zone, struct page *page, ...@@ -1647,19 +1647,11 @@ static inline void expand(struct zone *zone, struct page *page,
} }
} }
/* static void check_new_page_bad(struct page *page)
* This page is about to be returned from the page allocator
*/
static inline int check_new_page(struct page *page)
{ {
const char *bad_reason; const char *bad_reason = NULL;
unsigned long bad_flags; unsigned long bad_flags = 0;
if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))
return 0;
bad_reason = NULL;
bad_flags = 0;
if (unlikely(atomic_read(&page->_mapcount) != -1)) if (unlikely(atomic_read(&page->_mapcount) != -1))
bad_reason = "nonzero mapcount"; bad_reason = "nonzero mapcount";
if (unlikely(page->mapping != NULL)) if (unlikely(page->mapping != NULL))
...@@ -1678,11 +1670,20 @@ static inline int check_new_page(struct page *page) ...@@ -1678,11 +1670,20 @@ static inline int check_new_page(struct page *page)
if (unlikely(page->mem_cgroup)) if (unlikely(page->mem_cgroup))
bad_reason = "page still charged to cgroup"; bad_reason = "page still charged to cgroup";
#endif #endif
if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags);
bad_page(page, bad_reason, bad_flags); }
return 1;
} /*
return 0; * This page is about to be returned from the page allocator
*/
static inline int check_new_page(struct page *page)
{
if (likely(page_expected_state(page,
PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
return 0;
check_new_page_bad(page);
return 1;
} }
static inline bool free_pages_prezeroed(bool poisoned) static inline bool free_pages_prezeroed(bool poisoned)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment