Commit 031bc574 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/debug-pagealloc: make debug-pagealloc boottime configurable

Now, we have prepared to avoid using debug-pagealloc in boottime.  So
introduce new kernel-parameter to disable debug-pagealloc in boottime, and
makes related functions to be disabled in this case.

Only non-intuitive part is change of guard page functions.  Because guard
page is effective only if debug-pagealloc is enabled, turning off
according to debug-pagealloc is reasonable thing to do.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dave Hansen <dave@sr71.net>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Jungsoo Son <jungsoo.son@lge.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e30825f1
...@@ -829,6 +829,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -829,6 +829,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CONFIG_DEBUG_PAGEALLOC, hence this option will not help CONFIG_DEBUG_PAGEALLOC, hence this option will not help
tracking down these problems. tracking down these problems.
debug_pagealloc=
[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
parameter enables the feature at boot time. In
default, it is disabled. We can avoid allocating huge
chunk of memory for debug pagealloc if we don't enable
it at boot time and the system will work mostly same
with the kernel built without CONFIG_DEBUG_PAGEALLOC.
on: enable the feature
debugpat [X86] Enable PAT debugging debugpat [X86] Enable PAT debugging
decnet.addr= [HW,NET] decnet.addr= [HW,NET]
......
...@@ -1514,7 +1514,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) ...@@ -1514,7 +1514,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0); mmu_kernel_ssize, 0);
} }
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
unsigned long flags, vaddr, lmi; unsigned long flags, vaddr, lmi;
int i; int i;
......
...@@ -429,7 +429,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot) ...@@ -429,7 +429,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
} }
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (PageHighMem(page)) if (PageHighMem(page))
return; return;
......
...@@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) ...@@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
} }
} }
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
unsigned long address; unsigned long address;
int nr, i, j; int nr, i, j;
......
...@@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void) ...@@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void)
} }
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
......
...@@ -1817,7 +1817,7 @@ static int __set_pages_np(struct page *page, int numpages) ...@@ -1817,7 +1817,7 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0); return __change_page_attr_set_clr(&cpa, 0);
} }
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (PageHighMem(page)) if (PageHighMem(page))
return; return;
......
...@@ -2061,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm, ...@@ -2061,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable); extern bool _debug_pagealloc_enabled;
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
static inline bool debug_pagealloc_enabled(void)
{
return _debug_pagealloc_enabled;
}
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled())
return;
__kernel_map_pages(page, numpages, enable);
}
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page); extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
......
...@@ -10,11 +10,17 @@ static bool page_poisoning_enabled __read_mostly; ...@@ -10,11 +10,17 @@ static bool page_poisoning_enabled __read_mostly;
static bool need_page_poisoning(void) static bool need_page_poisoning(void)
{ {
if (!debug_pagealloc_enabled())
return false;
return true; return true;
} }
static void init_page_poisoning(void) static void init_page_poisoning(void)
{ {
if (!debug_pagealloc_enabled())
return;
page_poisoning_enabled = true; page_poisoning_enabled = true;
} }
...@@ -119,7 +125,7 @@ static void unpoison_pages(struct page *page, int n) ...@@ -119,7 +125,7 @@ static void unpoison_pages(struct page *page, int n)
unpoison_page(page + i); unpoison_page(page + i);
} }
void kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (!page_poisoning_enabled) if (!page_poisoning_enabled)
return; return;
......
...@@ -425,15 +425,35 @@ static inline void prep_zero_page(struct page *page, unsigned int order, ...@@ -425,15 +425,35 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder; unsigned int _debug_guardpage_minorder;
bool _debug_pagealloc_enabled __read_mostly;
bool _debug_guardpage_enabled __read_mostly; bool _debug_guardpage_enabled __read_mostly;
static int __init early_debug_pagealloc(char *buf)
{
if (!buf)
return -EINVAL;
if (strcmp(buf, "on") == 0)
_debug_pagealloc_enabled = true;
return 0;
}
early_param("debug_pagealloc", early_debug_pagealloc);
static bool need_debug_guardpage(void) static bool need_debug_guardpage(void)
{ {
/* If we don't use debug_pagealloc, we don't need guard page */
if (!debug_pagealloc_enabled())
return false;
return true; return true;
} }
static void init_debug_guardpage(void) static void init_debug_guardpage(void)
{ {
if (!debug_pagealloc_enabled())
return;
_debug_guardpage_enabled = true; _debug_guardpage_enabled = true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment