Commit d6332692 authored by Rick Edgecombe's avatar Rick Edgecombe Committed by Ingo Molnar

mm/hibernation: Make hibernation handle unmapped pages

Make hibernate handle unmapped pages on the direct map when
CONFIG_ARCH_HAS_SET_ALIAS=y is set. These functions allow for setting pages
to invalid configurations, so now hibernate should check if the pages have
valid mappings and handle if they are unmapped when doing a hibernate
save operation.

Previously this checking was already done when CONFIG_DEBUG_PAGEALLOC=y
was configured. It does not appear to have a big hibernating performance
impact. The speed of the saving operation before this change was measured
as 819.02 MB/s, and after was measured at 813.32 MB/s.

Before:
[    4.670938] PM: Wrote 171996 kbytes in 0.21 seconds (819.02 MB/s)

After:
[    4.504714] PM: Wrote 178932 kbytes in 0.22 seconds (813.32 MB/s)
Signed-off-by: default avatarRick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Cc: <akpm@linux-foundation.org>
Cc: <ard.biesheuvel@linaro.org>
Cc: <deneen.t.dock@intel.com>
Cc: <kernel-hardening@lists.openwall.com>
Cc: <kristen@linux.intel.com>
Cc: <linux_dti@icloud.com>
Cc: <will.deacon@arm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190426001143.4983-16-namit@vmware.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d253ca0c
...@@ -2257,7 +2257,6 @@ int set_direct_map_default_noflush(struct page *page) ...@@ -2257,7 +2257,6 @@ int set_direct_map_default_noflush(struct page *page)
return __set_pages_p(page, 1); return __set_pages_p(page, 1);
} }
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (PageHighMem(page)) if (PageHighMem(page))
...@@ -2302,11 +2301,8 @@ bool kernel_page_present(struct page *page) ...@@ -2302,11 +2301,8 @@ bool kernel_page_present(struct page *page)
pte = lookup_address((unsigned long)page_address(page), &level); pte = lookup_address((unsigned long)page_address(page), &level);
return (pte_val(*pte) & _PAGE_PRESENT); return (pte_val(*pte) & _PAGE_PRESENT);
} }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_DEBUG_PAGEALLOC */
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags) unsigned numpages, unsigned long page_flags)
{ {
......
...@@ -2610,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages, ...@@ -2610,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { } int enable) { }
#endif #endif
#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled; extern bool _debug_pagealloc_enabled;
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
static inline bool debug_pagealloc_enabled(void) static inline bool debug_pagealloc_enabled(void)
{ {
return _debug_pagealloc_enabled; return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
} }
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
static inline void static inline void
kernel_map_pages(struct page *page, int numpages, int enable) kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (!debug_pagealloc_enabled())
return;
__kernel_map_pages(page, numpages, enable); __kernel_map_pages(page, numpages, enable);
} }
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page); extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#else /* CONFIG_DEBUG_PAGEALLOC */ #else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
static inline void static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {} kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; } static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
static inline bool debug_pagealloc_enabled(void) #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
{
return false;
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA #ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
......
...@@ -1342,8 +1342,9 @@ static inline void do_copy_page(long *dst, long *src) ...@@ -1342,8 +1342,9 @@ static inline void do_copy_page(long *dst, long *src)
* safe_copy_page - Copy a page in a safe way. * safe_copy_page - Copy a page in a safe way.
* *
* Check if the page we are going to copy is marked as present in the kernel * Check if the page we are going to copy is marked as present in the kernel
* page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
* and in that case kernel_page_present() always returns 'true'). * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
* always returns 'true'.
*/ */
static void safe_copy_page(void *dst, struct page *s_page) static void safe_copy_page(void *dst, struct page *s_page)
{ {
......
...@@ -1144,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct page *page, ...@@ -1144,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
} }
arch_free_page(page, order); arch_free_page(page, order);
kernel_poison_pages(page, 1 << order, 0); kernel_poison_pages(page, 1 << order, 0);
kernel_map_pages(page, 1 << order, 0); if (debug_pagealloc_enabled())
kernel_map_pages(page, 1 << order, 0);
kasan_free_nondeferred_pages(page, order); kasan_free_nondeferred_pages(page, order);
return true; return true;
...@@ -2014,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, ...@@ -2014,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_refcounted(page); set_page_refcounted(page);
arch_alloc_page(page, order); arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1); if (debug_pagealloc_enabled())
kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order); kasan_alloc_pages(page, order);
kernel_poison_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1);
set_page_owner(page, order, gfp_flags); set_page_owner(page, order, gfp_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment