Commit 3972f6bb authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, debug_pagealloc: use a page type instead of page_ext flag

When debug_pagealloc is enabled, we currently allocate the page_ext
array to mark guard pages with the PAGE_EXT_DEBUG_GUARD flag.  Now that
we have the page_type field in struct page, we can use that instead, as
guard pages are neither PageSlab nor mapped to userspace.  This reduces
memory overhead when debug_pagealloc is enabled and there are no other
features requiring the page_ext array.

Link: http://lkml.kernel.org/r/20190603143451.27353-4-vbabka@suse.czSigned-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4462b32c
...@@ -805,12 +805,10 @@ ...@@ -805,12 +805,10 @@
tracking down these problems. tracking down these problems.
debug_pagealloc= debug_pagealloc=
[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this parameter
parameter enables the feature at boot time. In enables the feature at boot time. By default, it is
default, it is disabled. We can avoid allocating huge disabled and the system will work mostly the same as a
chunk of memory for debug pagealloc if we don't enable kernel built without CONFIG_DEBUG_PAGEALLOC.
it at boot time and the system will work mostly same
with the kernel built without CONFIG_DEBUG_PAGEALLOC.
on: enable the feature on: enable the feature
debugpat [X86] Enable PAT debugging debugpat [X86] Enable PAT debugging
......
...@@ -2862,8 +2862,6 @@ extern long copy_huge_page_from_user(struct page *dst_page, ...@@ -2862,8 +2862,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
bool allow_pagefault); bool allow_pagefault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder; extern unsigned int _debug_guardpage_minorder;
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
...@@ -2880,16 +2878,10 @@ static inline bool debug_guardpage_enabled(void) ...@@ -2880,16 +2878,10 @@ static inline bool debug_guardpage_enabled(void)
static inline bool page_is_guard(struct page *page) static inline bool page_is_guard(struct page *page)
{ {
struct page_ext *page_ext;
if (!debug_guardpage_enabled()) if (!debug_guardpage_enabled())
return false; return false;
page_ext = lookup_page_ext(page); return PageGuard(page);
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
} }
#else #else
static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline unsigned int debug_guardpage_minorder(void) { return 0; }
......
...@@ -703,6 +703,7 @@ PAGEFLAG_FALSE(DoubleMap) ...@@ -703,6 +703,7 @@ PAGEFLAG_FALSE(DoubleMap)
#define PG_offline 0x00000100 #define PG_offline 0x00000100
#define PG_kmemcg 0x00000200 #define PG_kmemcg 0x00000200
#define PG_table 0x00000400 #define PG_table 0x00000400
#define PG_guard 0x00000800
#define PageType(page, flag) \ #define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
...@@ -754,6 +755,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg) ...@@ -754,6 +755,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg)
*/ */
PAGE_TYPE_OPS(Table, table) PAGE_TYPE_OPS(Table, table)
/*
* Marks guardpages used with debug_pagealloc.
*/
PAGE_TYPE_OPS(Guard, guard)
extern bool is_free_buddy_page(struct page *page); extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY); __PAGEFLAG(Isolated, isolated, PF_ANY);
......
...@@ -17,7 +17,6 @@ struct page_ext_operations { ...@@ -17,7 +17,6 @@ struct page_ext_operations {
#ifdef CONFIG_PAGE_EXTENSION #ifdef CONFIG_PAGE_EXTENSION
enum page_ext_flags { enum page_ext_flags {
PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER, PAGE_EXT_OWNER,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG, PAGE_EXT_YOUNG,
......
...@@ -12,7 +12,6 @@ config DEBUG_PAGEALLOC ...@@ -12,7 +12,6 @@ config DEBUG_PAGEALLOC
bool "Debug page memory allocations" bool "Debug page memory allocations"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help--- ---help---
Unmap pages from the kernel linear mapping after free_pages(). Unmap pages from the kernel linear mapping after free_pages().
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/page-isolation.h> #include <linux/page-isolation.h>
#include <linux/page_ext.h>
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/compaction.h> #include <linux/compaction.h>
...@@ -668,18 +667,6 @@ static int __init early_debug_pagealloc(char *buf) ...@@ -668,18 +667,6 @@ static int __init early_debug_pagealloc(char *buf)
} }
early_param("debug_pagealloc", early_debug_pagealloc); early_param("debug_pagealloc", early_debug_pagealloc);
static bool need_debug_guardpage(void)
{
/* If we don't use debug_pagealloc, we don't need guard page */
if (!debug_pagealloc_enabled())
return false;
if (!debug_guardpage_minorder())
return false;
return true;
}
static void init_debug_guardpage(void) static void init_debug_guardpage(void)
{ {
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled())
...@@ -691,11 +678,6 @@ static void init_debug_guardpage(void) ...@@ -691,11 +678,6 @@ static void init_debug_guardpage(void)
static_branch_enable(&_debug_guardpage_enabled); static_branch_enable(&_debug_guardpage_enabled);
} }
struct page_ext_operations debug_guardpage_ops = {
.need = need_debug_guardpage,
.init = init_debug_guardpage,
};
static int __init debug_guardpage_minorder_setup(char *buf) static int __init debug_guardpage_minorder_setup(char *buf)
{ {
unsigned long res; unsigned long res;
...@@ -713,20 +695,13 @@ early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); ...@@ -713,20 +695,13 @@ early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
static inline bool set_page_guard(struct zone *zone, struct page *page, static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) unsigned int order, int migratetype)
{ {
struct page_ext *page_ext;
if (!debug_guardpage_enabled()) if (!debug_guardpage_enabled())
return false; return false;
if (order >= debug_guardpage_minorder()) if (order >= debug_guardpage_minorder())
return false; return false;
page_ext = lookup_page_ext(page); __SetPageGuard(page);
if (unlikely(!page_ext))
return false;
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->lru);
set_page_private(page, order); set_page_private(page, order);
/* Guard pages are not available for any usage */ /* Guard pages are not available for any usage */
...@@ -738,23 +713,16 @@ static inline bool set_page_guard(struct zone *zone, struct page *page, ...@@ -738,23 +713,16 @@ static inline bool set_page_guard(struct zone *zone, struct page *page,
static inline void clear_page_guard(struct zone *zone, struct page *page, static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) unsigned int order, int migratetype)
{ {
struct page_ext *page_ext;
if (!debug_guardpage_enabled()) if (!debug_guardpage_enabled())
return; return;
page_ext = lookup_page_ext(page); __ClearPageGuard(page);
if (unlikely(!page_ext))
return;
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0); set_page_private(page, 0);
if (!is_migrate_isolate(migratetype)) if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, (1 << order), migratetype); __mod_zone_freepage_state(zone, (1 << order), migratetype);
} }
#else #else
struct page_ext_operations debug_guardpage_ops;
static inline bool set_page_guard(struct zone *zone, struct page *page, static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) { return false; } unsigned int order, int migratetype) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page, static inline void clear_page_guard(struct zone *zone, struct page *page,
...@@ -1930,6 +1898,10 @@ void __init page_alloc_init_late(void) ...@@ -1930,6 +1898,10 @@ void __init page_alloc_init_late(void)
for_each_populated_zone(zone) for_each_populated_zone(zone)
set_zone_contiguous(zone); set_zone_contiguous(zone);
#ifdef CONFIG_DEBUG_PAGEALLOC
init_debug_guardpage();
#endif
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
......
...@@ -59,9 +59,6 @@ ...@@ -59,9 +59,6 @@
*/ */
static struct page_ext_operations *page_ext_ops[] = { static struct page_ext_operations *page_ext_ops[] = {
#ifdef CONFIG_DEBUG_PAGEALLOC
&debug_guardpage_ops,
#endif
#ifdef CONFIG_PAGE_OWNER #ifdef CONFIG_PAGE_OWNER
&page_owner_ops, &page_owner_ops,
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment