Commit 7f234a4d authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-sleep' and 'pm-tools'

* pm-sleep:
  PM / hibernate: Introduce test_resume mode for hibernation
  x86 / hibernate: Use hlt_play_dead() when resuming from hibernation
  PM / hibernate: Image data protection during restoration
  PM / hibernate: Add missing braces in __register_nosave_region()
  PM / hibernate: Clean up comments in snapshot.c
  PM / hibernate: Clean up function headers in snapshot.c
  PM / hibernate: Add missing braces in hibernate_setup()
  PM / hibernate: Recycle safe pages after image restoration
  PM / hibernate: Simplify mark_unsafe_pages()
  PM / hibernate: Do not free preallocated safe pages during image restore
  PM / suspend: show workqueue state in suspend flow
  PM / sleep: make PM notifiers called symmetrically
  PM / sleep: Make pm_prepare_console() return void
  PM / Hibernate: Don't let kasan instrument snapshot.c

* pm-tools:
  PM / tools: scripts: AnalyzeSuspend v4.2
  tools/turbostat: allow user to alter DESTDIR and PREFIX
...@@ -3594,6 +3594,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3594,6 +3594,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
present during boot. present during boot.
nocompress Don't compress/decompress hibernation images. nocompress Don't compress/decompress hibernation images.
no Disable hibernation and resume. no Disable hibernation and resume.
protect_image Turn on image protection during restoration
(that will set all pages holding image data
during restoration read-only).
retain_initrd [RAM] Keep initrd memory after extraction retain_initrd [RAM] Keep initrd memory after extraction
......
...@@ -135,6 +135,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); ...@@ -135,6 +135,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void); int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu); int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu);
void hlt_play_dead(void);
void native_play_dead(void); void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
void wbinvd_on_cpu(int cpu); void wbinvd_on_cpu(int cpu);
......
...@@ -1622,7 +1622,7 @@ static inline void mwait_play_dead(void) ...@@ -1622,7 +1622,7 @@ static inline void mwait_play_dead(void)
} }
} }
static inline void hlt_play_dead(void) void hlt_play_dead(void)
{ {
if (__this_cpu_read(cpu_info.x86) >= 4) if (__this_cpu_read(cpu_info.x86) >= 4)
wbinvd(); wbinvd();
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/tboot.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/proto.h> #include <asm/proto.h>
...@@ -266,6 +267,35 @@ void notrace restore_processor_state(void) ...@@ -266,6 +267,35 @@ void notrace restore_processor_state(void)
EXPORT_SYMBOL(restore_processor_state); EXPORT_SYMBOL(restore_processor_state);
#endif #endif
#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
static void resume_play_dead(void)
{
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
hlt_play_dead();
}
int hibernate_resume_nonboot_cpu_disable(void)
{
void (*play_dead)(void) = smp_ops.play_dead;
int ret;
/*
* Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
* during hibernate image restoration, because it is likely that the
* monitored address will be actually written to at that time and then
* the "dead" CPU will attempt to execute instructions again, but the
* address in its instruction pointer may not be possible to resolve
* any more at that point (the page tables used by it previously may
* have been overwritten by hibernate image data).
*/
smp_ops.play_dead = resume_play_dead;
ret = disable_nonboot_cpus();
smp_ops.play_dead = play_dead;
return ret;
}
#endif
/* /*
* When bsp_check() is called in hibernate and suspend, cpu hotplug * When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnessary to handle race condition between * is disabled already. So it's unnessary to handle race condition between
......
...@@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch) ...@@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch)
#endif #endif
#ifdef CONFIG_VT_CONSOLE_SLEEP #ifdef CONFIG_VT_CONSOLE_SLEEP
extern int pm_prepare_console(void); extern void pm_prepare_console(void);
extern void pm_restore_console(void); extern void pm_restore_console(void);
#else #else
static inline int pm_prepare_console(void) static inline void pm_prepare_console(void)
{ {
return 0;
} }
static inline void pm_restore_console(void) static inline void pm_restore_console(void)
......
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
KASAN_SANITIZE_snapshot.o := n
obj-y += qos.o obj-y += qos.o
obj-$(CONFIG_PM) += main.o obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
......
...@@ -126,17 +126,17 @@ static bool pm_vt_switch(void) ...@@ -126,17 +126,17 @@ static bool pm_vt_switch(void)
return ret; return ret;
} }
int pm_prepare_console(void) void pm_prepare_console(void)
{ {
if (!pm_vt_switch()) if (!pm_vt_switch())
return 0; return;
orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
if (orig_fgconsole < 0) if (orig_fgconsole < 0)
return 1; return;
orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
return 0; return;
} }
void pm_restore_console(void) void pm_restore_console(void)
......
...@@ -52,6 +52,7 @@ enum { ...@@ -52,6 +52,7 @@ enum {
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
HIBERNATION_SUSPEND, HIBERNATION_SUSPEND,
#endif #endif
HIBERNATION_TEST_RESUME,
/* keep last */ /* keep last */
__HIBERNATION_AFTER_LAST __HIBERNATION_AFTER_LAST
}; };
...@@ -409,6 +410,11 @@ int hibernation_snapshot(int platform_mode) ...@@ -409,6 +410,11 @@ int hibernation_snapshot(int platform_mode)
goto Close; goto Close;
} }
int __weak hibernate_resume_nonboot_cpu_disable(void)
{
return disable_nonboot_cpus();
}
/** /**
* resume_target_kernel - Restore system state from a hibernation image. * resume_target_kernel - Restore system state from a hibernation image.
* @platform_mode: Whether or not to use the platform driver. * @platform_mode: Whether or not to use the platform driver.
...@@ -433,7 +439,7 @@ static int resume_target_kernel(bool platform_mode) ...@@ -433,7 +439,7 @@ static int resume_target_kernel(bool platform_mode)
if (error) if (error)
goto Cleanup; goto Cleanup;
error = disable_nonboot_cpus(); error = hibernate_resume_nonboot_cpu_disable();
if (error) if (error)
goto Enable_cpus; goto Enable_cpus;
...@@ -642,12 +648,39 @@ static void power_down(void) ...@@ -642,12 +648,39 @@ static void power_down(void)
cpu_relax(); cpu_relax();
} }
static int load_image_and_restore(void)
{
int error;
unsigned int flags;
pr_debug("PM: Loading hibernation image.\n");
lock_device_hotplug();
error = create_basic_memory_bitmaps();
if (error)
goto Unlock;
error = swsusp_read(&flags);
swsusp_close(FMODE_READ);
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
swsusp_free();
free_basic_memory_bitmaps();
Unlock:
unlock_device_hotplug();
return error;
}
/** /**
* hibernate - Carry out system hibernation, including saving the image. * hibernate - Carry out system hibernation, including saving the image.
*/ */
int hibernate(void) int hibernate(void)
{ {
int error; int error, nr_calls = 0;
bool snapshot_test = false;
if (!hibernation_available()) { if (!hibernation_available()) {
pr_debug("PM: Hibernation not available.\n"); pr_debug("PM: Hibernation not available.\n");
...@@ -662,9 +695,11 @@ int hibernate(void) ...@@ -662,9 +695,11 @@ int hibernate(void)
} }
pm_prepare_console(); pm_prepare_console();
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) if (error) {
nr_calls--;
goto Exit; goto Exit;
}
printk(KERN_INFO "PM: Syncing filesystems ... "); printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync(); sys_sync();
...@@ -697,8 +732,12 @@ int hibernate(void) ...@@ -697,8 +732,12 @@ int hibernate(void)
pr_debug("PM: writing image.\n"); pr_debug("PM: writing image.\n");
error = swsusp_write(flags); error = swsusp_write(flags);
swsusp_free(); swsusp_free();
if (!error) if (!error) {
if (hibernation_mode == HIBERNATION_TEST_RESUME)
snapshot_test = true;
else
power_down(); power_down();
}
in_suspend = 0; in_suspend = 0;
pm_restore_gfp_mask(); pm_restore_gfp_mask();
} else { } else {
...@@ -709,12 +748,18 @@ int hibernate(void) ...@@ -709,12 +748,18 @@ int hibernate(void)
free_basic_memory_bitmaps(); free_basic_memory_bitmaps();
Thaw: Thaw:
unlock_device_hotplug(); unlock_device_hotplug();
if (snapshot_test) {
pr_debug("PM: Checking hibernation image\n");
error = swsusp_check();
if (!error)
error = load_image_and_restore();
}
thaw_processes(); thaw_processes();
/* Don't bother checking whether freezer_test_done is true */ /* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false; freezer_test_done = false;
Exit: Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION); __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
pm_restore_console(); pm_restore_console();
atomic_inc(&snapshot_device_available); atomic_inc(&snapshot_device_available);
Unlock: Unlock:
...@@ -740,8 +785,7 @@ int hibernate(void) ...@@ -740,8 +785,7 @@ int hibernate(void)
*/ */
static int software_resume(void) static int software_resume(void)
{ {
int error; int error, nr_calls = 0;
unsigned int flags;
/* /*
* If the user said "noresume".. bail out early. * If the user said "noresume".. bail out early.
...@@ -827,35 +871,20 @@ static int software_resume(void) ...@@ -827,35 +871,20 @@ static int software_resume(void)
} }
pm_prepare_console(); pm_prepare_console();
error = pm_notifier_call_chain(PM_RESTORE_PREPARE); error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (error) if (error) {
nr_calls--;
goto Close_Finish; goto Close_Finish;
}
pr_debug("PM: Preparing processes for restore.\n"); pr_debug("PM: Preparing processes for restore.\n");
error = freeze_processes(); error = freeze_processes();
if (error) if (error)
goto Close_Finish; goto Close_Finish;
error = load_image_and_restore();
pr_debug("PM: Loading hibernation image.\n");
lock_device_hotplug();
error = create_basic_memory_bitmaps();
if (error)
goto Thaw;
error = swsusp_read(&flags);
swsusp_close(FMODE_READ);
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
swsusp_free();
free_basic_memory_bitmaps();
Thaw:
unlock_device_hotplug();
thaw_processes(); thaw_processes();
Finish: Finish:
pm_notifier_call_chain(PM_POST_RESTORE); __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console(); pm_restore_console();
atomic_inc(&snapshot_device_available); atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */ /* For success case, the suspend path will release the lock */
...@@ -878,6 +907,7 @@ static const char * const hibernation_modes[] = { ...@@ -878,6 +907,7 @@ static const char * const hibernation_modes[] = {
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
[HIBERNATION_SUSPEND] = "suspend", [HIBERNATION_SUSPEND] = "suspend",
#endif #endif
[HIBERNATION_TEST_RESUME] = "test_resume",
}; };
/* /*
...@@ -924,6 +954,7 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -924,6 +954,7 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
case HIBERNATION_SUSPEND: case HIBERNATION_SUSPEND:
#endif #endif
case HIBERNATION_TEST_RESUME:
break; break;
case HIBERNATION_PLATFORM: case HIBERNATION_PLATFORM:
if (hibernation_ops) if (hibernation_ops)
...@@ -970,6 +1001,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -970,6 +1001,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
case HIBERNATION_SUSPEND: case HIBERNATION_SUSPEND:
#endif #endif
case HIBERNATION_TEST_RESUME:
hibernation_mode = mode; hibernation_mode = mode;
break; break;
case HIBERNATION_PLATFORM: case HIBERNATION_PLATFORM:
...@@ -1115,13 +1147,16 @@ static int __init resume_offset_setup(char *str) ...@@ -1115,13 +1147,16 @@ static int __init resume_offset_setup(char *str)
static int __init hibernate_setup(char *str) static int __init hibernate_setup(char *str)
{ {
if (!strncmp(str, "noresume", 8)) if (!strncmp(str, "noresume", 8)) {
noresume = 1; noresume = 1;
else if (!strncmp(str, "nocompress", 10)) } else if (!strncmp(str, "nocompress", 10)) {
nocompress = 1; nocompress = 1;
else if (!strncmp(str, "no", 2)) { } else if (!strncmp(str, "no", 2)) {
noresume = 1; noresume = 1;
nohibernate = 1; nohibernate = 1;
} else if (IS_ENABLED(CONFIG_DEBUG_RODATA)
&& !strncmp(str, "protect_image", 13)) {
enable_restore_image_protection();
} }
return 1; return 1;
} }
......
...@@ -38,12 +38,19 @@ int unregister_pm_notifier(struct notifier_block *nb) ...@@ -38,12 +38,19 @@ int unregister_pm_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(unregister_pm_notifier); EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val) int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
{ {
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL); int ret;
ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
nr_to_call, nr_calls);
return notifier_to_errno(ret); return notifier_to_errno(ret);
} }
int pm_notifier_call_chain(unsigned long val)
{
return __pm_notifier_call_chain(val, -1, NULL);
}
/* If set, devices may be suspended and resumed asynchronously. */ /* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 1; int pm_async_enabled = 1;
......
...@@ -38,6 +38,8 @@ static inline char *check_image_kernel(struct swsusp_info *info) ...@@ -38,6 +38,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
} }
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */ #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
extern int hibernate_resume_nonboot_cpu_disable(void);
/* /*
* Keep some memory free so that I/O operations can succeed without paging * Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?] * [Might this be more than 4 MB?]
...@@ -59,6 +61,13 @@ extern int hibernation_snapshot(int platform_mode); ...@@ -59,6 +61,13 @@ extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode); extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void); extern int hibernation_platform_enter(void);
#ifdef CONFIG_DEBUG_RODATA
/* kernel/power/snapshot.c */
extern void enable_restore_image_protection(void);
#else
static inline void enable_restore_image_protection(void) {}
#endif /* CONFIG_DEBUG_RODATA */
#else /* !CONFIG_HIBERNATION */ #else /* !CONFIG_HIBERNATION */
static inline void hibernate_reserved_size_init(void) {} static inline void hibernate_reserved_size_init(void) {}
...@@ -200,6 +209,8 @@ static inline void suspend_test_finish(const char *label) {} ...@@ -200,6 +209,8 @@ static inline void suspend_test_finish(const char *label) {}
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */ /* kernel/power/main.c */
extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
int *nr_calls);
extern int pm_notifier_call_chain(unsigned long val); extern int pm_notifier_call_chain(unsigned long val);
#endif #endif
......
...@@ -89,6 +89,9 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -89,6 +89,9 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs / 1000, elapsed_msecs % 1000, elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy); todo - wq_busy, wq_busy);
if (wq_busy)
show_workqueue_state();
if (!wakeup) { if (!wakeup) {
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process_thread(g, p) { for_each_process_thread(g, p) {
......
...@@ -38,6 +38,43 @@ ...@@ -38,6 +38,43 @@
#include "power.h" #include "power.h"
#ifdef CONFIG_DEBUG_RODATA
static bool hibernate_restore_protection;
static bool hibernate_restore_protection_active;
void enable_restore_image_protection(void)
{
hibernate_restore_protection = true;
}
static inline void hibernate_restore_protection_begin(void)
{
hibernate_restore_protection_active = hibernate_restore_protection;
}
static inline void hibernate_restore_protection_end(void)
{
hibernate_restore_protection_active = false;
}
static inline void hibernate_restore_protect_page(void *page_address)
{
if (hibernate_restore_protection_active)
set_memory_ro((unsigned long)page_address, 1);
}
static inline void hibernate_restore_unprotect_page(void *page_address)
{
if (hibernate_restore_protection_active)
set_memory_rw((unsigned long)page_address, 1);
}
#else
static inline void hibernate_restore_protection_begin(void) {}
static inline void hibernate_restore_protection_end(void) {}
static inline void hibernate_restore_protect_page(void *page_address) {}
static inline void hibernate_restore_unprotect_page(void *page_address) {}
#endif /* CONFIG_DEBUG_RODATA */
static int swsusp_page_is_free(struct page *); static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *); static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *); static void swsusp_unset_page_forbidden(struct page *);
...@@ -67,25 +104,32 @@ void __init hibernate_image_size_init(void) ...@@ -67,25 +104,32 @@ void __init hibernate_image_size_init(void)
image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
} }
/* List of PBEs needed for restoring the pages that were allocated before /*
* List of PBEs needed for restoring the pages that were allocated before
* the suspend and included in the suspend image, but have also been * the suspend and included in the suspend image, but have also been
* allocated by the "resume" kernel, so their contents cannot be written * allocated by the "resume" kernel, so their contents cannot be written
* directly to their "original" page frames. * directly to their "original" page frames.
*/ */
struct pbe *restore_pblist; struct pbe *restore_pblist;
/* Pointer to an auxiliary buffer (1 page) */ /* struct linked_page is used to build chains of pages */
static void *buffer;
/** #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
* @safe_needed - on resume, for storing the PBE list and the image,
* we can only use memory pages that do not conflict with the pages struct linked_page {
* used before suspend. The unsafe pages have PageNosaveFree set struct linked_page *next;
* and we count them using unsafe_pages. char data[LINKED_PAGE_DATA_SIZE];
* } __packed;
* Each allocated image page is marked as PageNosave and PageNosaveFree
* so that swsusp_free() can release it. /*
* List of "safe" pages (ie. pages that were not used by the image kernel
* before hibernation) that may be used as temporary storage for image kernel
* memory contents.
*/ */
static struct linked_page *safe_pages_list;
/* Pointer to an auxiliary buffer (1 page) */
static void *buffer;
#define PG_ANY 0 #define PG_ANY 0
#define PG_SAFE 1 #define PG_SAFE 1
...@@ -94,6 +138,19 @@ static void *buffer; ...@@ -94,6 +138,19 @@ static void *buffer;
static unsigned int allocated_unsafe_pages; static unsigned int allocated_unsafe_pages;
/**
* get_image_page - Allocate a page for a hibernation image.
* @gfp_mask: GFP mask for the allocation.
* @safe_needed: Get pages that were not used before hibernation (restore only)
*
* During image restoration, for storing the PBE list and the image data, we can
* only use memory pages that do not conflict with the pages used before
* hibernation. The "unsafe" pages have PageNosaveFree set and we count them
* using allocated_unsafe_pages.
*
* Each allocated image page is marked as PageNosave and PageNosaveFree so that
* swsusp_free() can release it.
*/
static void *get_image_page(gfp_t gfp_mask, int safe_needed) static void *get_image_page(gfp_t gfp_mask, int safe_needed)
{ {
void *res; void *res;
...@@ -113,9 +170,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed) ...@@ -113,9 +170,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
return res; return res;
} }
static void *__get_safe_page(gfp_t gfp_mask)
{
if (safe_pages_list) {
void *ret = safe_pages_list;
safe_pages_list = safe_pages_list->next;
memset(ret, 0, PAGE_SIZE);
return ret;
}
return get_image_page(gfp_mask, PG_SAFE);
}
unsigned long get_safe_page(gfp_t gfp_mask) unsigned long get_safe_page(gfp_t gfp_mask)
{ {
return (unsigned long)get_image_page(gfp_mask, PG_SAFE); return (unsigned long)__get_safe_page(gfp_mask);
} }
static struct page *alloc_image_page(gfp_t gfp_mask) static struct page *alloc_image_page(gfp_t gfp_mask)
...@@ -130,11 +199,22 @@ static struct page *alloc_image_page(gfp_t gfp_mask) ...@@ -130,11 +199,22 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
return page; return page;
} }
static void recycle_safe_page(void *page_address)
{
struct linked_page *lp = page_address;
lp->next = safe_pages_list;
safe_pages_list = lp;
}
/** /**
* free_image_page - free page represented by @addr, allocated with * free_image_page - Free a page allocated for hibernation image.
* get_image_page (page flags set by it must be cleared) * @addr: Address of the page to free.
* @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
*
* The page to free should have been allocated by get_image_page() (page flags
* set by it are affected).
*/ */
static inline void free_image_page(void *addr, int clear_nosave_free) static inline void free_image_page(void *addr, int clear_nosave_free)
{ {
struct page *page; struct page *page;
...@@ -150,17 +230,8 @@ static inline void free_image_page(void *addr, int clear_nosave_free) ...@@ -150,17 +230,8 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
__free_page(page); __free_page(page);
} }
/* struct linked_page is used to build chains of pages */ static inline void free_list_of_pages(struct linked_page *list,
int clear_page_nosave)
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
struct linked_page {
struct linked_page *next;
char data[LINKED_PAGE_DATA_SIZE];
} __packed;
static inline void
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
{ {
while (list) { while (list) {
struct linked_page *lp = list->next; struct linked_page *lp = list->next;
...@@ -170,7 +241,7 @@ free_list_of_pages(struct linked_page *list, int clear_page_nosave) ...@@ -170,7 +241,7 @@ free_list_of_pages(struct linked_page *list, int clear_page_nosave)
} }
} }
/** /*
* struct chain_allocator is used for allocating small objects out of * struct chain_allocator is used for allocating small objects out of
* a linked list of pages called 'the chain'. * a linked list of pages called 'the chain'.
* *
...@@ -182,18 +253,16 @@ free_list_of_pages(struct linked_page *list, int clear_page_nosave) ...@@ -182,18 +253,16 @@ free_list_of_pages(struct linked_page *list, int clear_page_nosave)
* NOTE: The chain allocator may be inefficient if the allocated objects * NOTE: The chain allocator may be inefficient if the allocated objects
* are not much smaller than PAGE_SIZE. * are not much smaller than PAGE_SIZE.
*/ */
struct chain_allocator { struct chain_allocator {
struct linked_page *chain; /* the chain */ struct linked_page *chain; /* the chain */
unsigned int used_space; /* total size of objects allocated out unsigned int used_space; /* total size of objects allocated out
* of the current page of the current page */
*/
gfp_t gfp_mask; /* mask for allocating pages */ gfp_t gfp_mask; /* mask for allocating pages */
int safe_needed; /* if set, only "safe" pages are allocated */ int safe_needed; /* if set, only "safe" pages are allocated */
}; };
static void static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) int safe_needed)
{ {
ca->chain = NULL; ca->chain = NULL;
ca->used_space = LINKED_PAGE_DATA_SIZE; ca->used_space = LINKED_PAGE_DATA_SIZE;
...@@ -208,7 +277,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) ...@@ -208,7 +277,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
struct linked_page *lp; struct linked_page *lp;
lp = get_image_page(ca->gfp_mask, ca->safe_needed); lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
get_image_page(ca->gfp_mask, PG_ANY);
if (!lp) if (!lp)
return NULL; return NULL;
...@@ -243,7 +313,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) ...@@ -243,7 +313,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
* struct zone_bitmap contains a pointer to a list of bitmap block * struct zone_bitmap contains a pointer to a list of bitmap block
* objects and a pointer to the bitmap block object that has been * objects and a pointer to the bitmap block object that has been
* most recently used for setting bits. Additionally, it contains the * most recently used for setting bits. Additionally, it contains the
* pfns that correspond to the start and end of the represented zone. * PFNs that correspond to the start and end of the represented zone.
* *
* struct bm_block contains a pointer to the memory page in which * struct bm_block contains a pointer to the memory page in which
* information is stored (in the form of a block of bitmap) * information is stored (in the form of a block of bitmap)
...@@ -305,9 +375,8 @@ struct bm_position { ...@@ -305,9 +375,8 @@ struct bm_position {
struct memory_bitmap { struct memory_bitmap {
struct list_head zones; struct list_head zones;
struct linked_page *p_list; /* list of pages used to store zone struct linked_page *p_list; /* list of pages used to store zone
* bitmap objects and bitmap block bitmap objects and bitmap block
* objects objects */
*/
struct bm_position cur; /* most recently used bit position */ struct bm_position cur; /* most recently used bit position */
}; };
...@@ -321,7 +390,7 @@ struct memory_bitmap { ...@@ -321,7 +390,7 @@ struct memory_bitmap {
#endif #endif
#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
/* /**
* alloc_rtree_node - Allocate a new node and add it to the radix tree. * alloc_rtree_node - Allocate a new node and add it to the radix tree.
* *
* This function is used to allocate inner nodes as well as the * This function is used to allocate inner nodes as well as the
...@@ -347,8 +416,8 @@ static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, ...@@ -347,8 +416,8 @@ static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
return node; return node;
} }
/* /**
* add_rtree_block - Add a new leave node to the radix tree * add_rtree_block - Add a new leave node to the radix tree.
* *
* The leave nodes need to be allocated in order to keep the leaves * The leave nodes need to be allocated in order to keep the leaves
* linked list in order. This is guaranteed by the zone->blocks * linked list in order. This is guaranteed by the zone->blocks
...@@ -417,17 +486,18 @@ static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, ...@@ -417,17 +486,18 @@ static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
int clear_nosave_free); int clear_nosave_free);
/* /**
* create_zone_bm_rtree - create a radix tree for one zone * create_zone_bm_rtree - Create a radix tree for one zone.
* *
* Allocated the mem_zone_bm_rtree structure and initializes it. * Allocated the mem_zone_bm_rtree structure and initializes it.
* This function also allocated and builds the radix tree for the * This function also allocated and builds the radix tree for the
* zone. * zone.
*/ */
static struct mem_zone_bm_rtree * static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed, int safe_needed,
struct chain_allocator *ca, struct chain_allocator *ca,
unsigned long start, unsigned long end) unsigned long start,
unsigned long end)
{ {
struct mem_zone_bm_rtree *zone; struct mem_zone_bm_rtree *zone;
unsigned int i, nr_blocks; unsigned int i, nr_blocks;
...@@ -454,8 +524,8 @@ create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed, ...@@ -454,8 +524,8 @@ create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
return zone; return zone;
} }
/* /**
* free_zone_bm_rtree - Free the memory of the radix tree * free_zone_bm_rtree - Free the memory of the radix tree.
* *
* Free all node pages of the radix tree. The mem_zone_bm_rtree * Free all node pages of the radix tree. The mem_zone_bm_rtree
* structure itself is not freed here nor are the rtree_node * structure itself is not freed here nor are the rtree_node
...@@ -492,8 +562,8 @@ struct mem_extent { ...@@ -492,8 +562,8 @@ struct mem_extent {
}; };
/** /**
* free_mem_extents - free a list of memory extents * free_mem_extents - Free a list of memory extents.
* @list - list of extents to empty * @list: List of extents to free.
*/ */
static void free_mem_extents(struct list_head *list) static void free_mem_extents(struct list_head *list)
{ {
...@@ -506,10 +576,11 @@ static void free_mem_extents(struct list_head *list) ...@@ -506,10 +576,11 @@ static void free_mem_extents(struct list_head *list)
} }
/** /**
* create_mem_extents - create a list of memory extents representing * create_mem_extents - Create a list of memory extents.
* contiguous ranges of PFNs * @list: List to put the extents into.
* @list - list to put the extents into * @gfp_mask: Mask to use for memory allocations.
* @gfp_mask - mask to use for memory allocations *
* The extents represent contiguous ranges of PFNs.
*/ */
static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
{ {
...@@ -565,10 +636,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) ...@@ -565,10 +636,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
} }
/** /**
* memory_bm_create - allocate memory for a memory bitmap * memory_bm_create - Allocate memory for a memory bitmap.
*/ */
static int static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) int safe_needed)
{ {
struct chain_allocator ca; struct chain_allocator ca;
struct list_head mem_extents; struct list_head mem_extents;
...@@ -607,7 +678,8 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) ...@@ -607,7 +678,8 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
} }
/** /**
* memory_bm_free - free memory occupied by the memory bitmap @bm * memory_bm_free - Free memory occupied by the memory bitmap.
* @bm: Memory bitmap.
*/ */
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
{ {
...@@ -622,14 +694,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) ...@@ -622,14 +694,13 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
} }
/** /**
* memory_bm_find_bit - Find the bit for pfn in the memory * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
* bitmap
* *
* Find the bit in the bitmap @bm that corresponds to given pfn. * Find the bit in memory bitmap @bm that corresponds to the given PFN.
* The cur.zone, cur.block and cur.node_pfn member of @bm are * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
* updated. *
* It walks the radix tree to find the page which contains the bit for * Walk the radix tree to find the page containing the bit that represents @pfn
* pfn and returns the bit position in **addr and *bit_nr. * and return the position of the bit in @addr and @bit_nr.
*/ */
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
void **addr, unsigned int *bit_nr) void **addr, unsigned int *bit_nr)
...@@ -658,10 +729,9 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, ...@@ -658,10 +729,9 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
zone_found: zone_found:
/* /*
* We have a zone. Now walk the radix tree to find the leave * We have found the zone. Now walk the radix tree to find the leaf node
* node for our pfn. * for our PFN.
*/ */
node = bm->cur.node; node = bm->cur.node;
if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found; goto node_found;
...@@ -754,14 +824,14 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) ...@@ -754,14 +824,14 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
} }
/* /*
* rtree_next_node - Jumps to the next leave node * rtree_next_node - Jump to the next leaf node.
* *
* Sets the position to the beginning of the next node in the * Set the position to the beginning of the next node in the
* memory bitmap. This is either the next node in the current * memory bitmap. This is either the next node in the current
* zone's radix tree or the first node in the radix tree of the * zone's radix tree or the first node in the radix tree of the
* next zone. * next zone.
* *
* Returns true if there is a next node, false otherwise. * Return true if there is a next node, false otherwise.
*/ */
static bool rtree_next_node(struct memory_bitmap *bm) static bool rtree_next_node(struct memory_bitmap *bm)
{ {
...@@ -790,14 +860,15 @@ static bool rtree_next_node(struct memory_bitmap *bm) ...@@ -790,14 +860,15 @@ static bool rtree_next_node(struct memory_bitmap *bm)
} }
/** /**
* memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
* @bm: Memory bitmap.
* *
* Starting from the last returned position this function searches * Starting from the last returned position this function searches for the next
* for the next set bit in the memory bitmap and returns its * set bit in @bm and returns the PFN represented by it. If no more bits are
* number. If no more bit is set BM_END_OF_MAP is returned. * set, BM_END_OF_MAP is returned.
* *
* It is required to run memory_bm_position_reset() before the * It is required to run memory_bm_position_reset() before the first call to
* first call to this function. * this function for the given memory bitmap.
*/ */
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{ {
...@@ -819,11 +890,10 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) ...@@ -819,11 +890,10 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
return BM_END_OF_MAP; return BM_END_OF_MAP;
} }
/** /*
* This structure represents a range of page frames the contents of which * This structure represents a range of page frames the contents of which
* should not be saved during the suspend. * should not be saved during hibernation.
*/ */
struct nosave_region { struct nosave_region {
struct list_head list; struct list_head list;
unsigned long start_pfn; unsigned long start_pfn;
...@@ -832,15 +902,42 @@ struct nosave_region { ...@@ -832,15 +902,42 @@ struct nosave_region {
static LIST_HEAD(nosave_regions); static LIST_HEAD(nosave_regions);
static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
{
struct rtree_node *node;
list_for_each_entry(node, &zone->nodes, list)
recycle_safe_page(node->data);
list_for_each_entry(node, &zone->leaves, list)
recycle_safe_page(node->data);
}
static void memory_bm_recycle(struct memory_bitmap *bm)
{
struct mem_zone_bm_rtree *zone;
struct linked_page *p_list;
list_for_each_entry(zone, &bm->zones, list)
recycle_zone_bm_rtree(zone);
p_list = bm->p_list;
while (p_list) {
struct linked_page *lp = p_list;
p_list = lp->next;
recycle_safe_page(lp);
}
}
/** /**
* register_nosave_region - register a range of page frames the contents * register_nosave_region - Register a region of unsaveable memory.
* of which should not be saved during the suspend (to be used in the early *
* initialization code) * Register a range of page frames the contents of which should not be saved
* during hibernation (to be used in the early initialization code).
*/ */
void __init __register_nosave_region(unsigned long start_pfn,
void __init unsigned long end_pfn, int use_kmalloc)
__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
int use_kmalloc)
{ {
struct nosave_region *region; struct nosave_region *region;
...@@ -857,12 +954,13 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn, ...@@ -857,12 +954,13 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
} }
} }
if (use_kmalloc) { if (use_kmalloc) {
/* during init, this shouldn't fail */ /* During init, this shouldn't fail */
region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
BUG_ON(!region); BUG_ON(!region);
} else } else {
/* This allocation cannot fail */ /* This allocation cannot fail */
region = memblock_virt_alloc(sizeof(struct nosave_region), 0); region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
}
region->start_pfn = start_pfn; region->start_pfn = start_pfn;
region->end_pfn = end_pfn; region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions); list_add_tail(&region->list, &nosave_regions);
...@@ -923,10 +1021,12 @@ static void swsusp_unset_page_forbidden(struct page *page) ...@@ -923,10 +1021,12 @@ static void swsusp_unset_page_forbidden(struct page *page)
} }
/** /**
* mark_nosave_pages - set bits corresponding to the page frames the * mark_nosave_pages - Mark pages that should not be saved.
* contents of which should not be saved in a given bitmap. * @bm: Memory bitmap.
*
* Set the bits in @bm that correspond to the page frames the contents of which
* should not be saved.
*/ */
static void mark_nosave_pages(struct memory_bitmap *bm) static void mark_nosave_pages(struct memory_bitmap *bm)
{ {
struct nosave_region *region; struct nosave_region *region;
...@@ -956,13 +1056,13 @@ static void mark_nosave_pages(struct memory_bitmap *bm) ...@@ -956,13 +1056,13 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
} }
/** /**
* create_basic_memory_bitmaps - create bitmaps needed for marking page * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
* frames that should not be saved and free page frames. The pointers *
* forbidden_pages_map and free_pages_map are only modified if everything * Create bitmaps needed for marking page frames that should not be saved and
* goes well, because we don't want the bits to be used before both bitmaps * free page frames. The forbidden_pages_map and free_pages_map pointers are
* are set up. * only modified if everything goes well, because we don't want the bits to be
* touched before both bitmaps are set up.
*/ */
int create_basic_memory_bitmaps(void) int create_basic_memory_bitmaps(void)
{ {
struct memory_bitmap *bm1, *bm2; struct memory_bitmap *bm1, *bm2;
...@@ -1007,12 +1107,12 @@ int create_basic_memory_bitmaps(void) ...@@ -1007,12 +1107,12 @@ int create_basic_memory_bitmaps(void)
} }
/** /**
* free_basic_memory_bitmaps - free memory bitmaps allocated by * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
* create_basic_memory_bitmaps(). The auxiliary pointers are necessary *
* so that the bitmaps themselves are not referred to while they are being * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
* freed. * auxiliary pointers are necessary so that the bitmaps themselves are not
* referred to while they are being freed.
*/ */
void free_basic_memory_bitmaps(void) void free_basic_memory_bitmaps(void)
{ {
struct memory_bitmap *bm1, *bm2; struct memory_bitmap *bm1, *bm2;
...@@ -1033,11 +1133,13 @@ void free_basic_memory_bitmaps(void) ...@@ -1033,11 +1133,13 @@ void free_basic_memory_bitmaps(void)
} }
/** /**
* snapshot_additional_pages - estimate the number of additional pages * snapshot_additional_pages - Estimate the number of extra pages needed.
* be needed for setting up the suspend image data structures for given * @zone: Memory zone to carry out the computation for.
* zone (usually the returned value is greater than the exact number) *
* Estimate the number of additional pages needed for setting up a hibernation
* image data structures for @zone (usually, the returned value is greater than
* the exact number).
*/ */
unsigned int snapshot_additional_pages(struct zone *zone) unsigned int snapshot_additional_pages(struct zone *zone)
{ {
unsigned int rtree, nodes; unsigned int rtree, nodes;
...@@ -1055,10 +1157,10 @@ unsigned int snapshot_additional_pages(struct zone *zone) ...@@ -1055,10 +1157,10 @@ unsigned int snapshot_additional_pages(struct zone *zone)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/** /**
* count_free_highmem_pages - compute the total number of free highmem * count_free_highmem_pages - Compute the total number of free highmem pages.
* pages, system-wide. *
* The returned number is system-wide.
*/ */
static unsigned int count_free_highmem_pages(void) static unsigned int count_free_highmem_pages(void)
{ {
struct zone *zone; struct zone *zone;
...@@ -1072,11 +1174,12 @@ static unsigned int count_free_highmem_pages(void) ...@@ -1072,11 +1174,12 @@ static unsigned int count_free_highmem_pages(void)
} }
/** /**
* saveable_highmem_page - Determine whether a highmem page should be * saveable_highmem_page - Check if a highmem page is saveable.
* included in the suspend image. *
* Determine whether a highmem page should be included in a hibernation image.
* *
* We should save the page if it isn't Nosave or NosaveFree, or Reserved, * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
* and it isn't a part of a free chunk of pages. * and it isn't part of a free chunk of pages.
*/ */
static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
{ {
...@@ -1102,10 +1205,8 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) ...@@ -1102,10 +1205,8 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
} }
/** /**
* count_highmem_pages - compute the total number of saveable highmem * count_highmem_pages - Compute the total number of saveable highmem pages.
* pages.
*/ */
static unsigned int count_highmem_pages(void) static unsigned int count_highmem_pages(void)
{ {
struct zone *zone; struct zone *zone;
...@@ -1133,11 +1234,13 @@ static inline void *saveable_highmem_page(struct zone *z, unsigned long p) ...@@ -1133,11 +1234,13 @@ static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
/** /**
* saveable_page - Determine whether a non-highmem page should be included * saveable_page - Check if the given page is saveable.
* in the suspend image. *
* Determine whether a non-highmem page should be included in a hibernation
* image.
* *
* We should save the page if it isn't Nosave, and is not in the range * We should save the page if it isn't Nosave, and is not in the range
* of pages statically defined as 'unsaveable', and it isn't a part of * of pages statically defined as 'unsaveable', and it isn't part of
* a free chunk of pages. * a free chunk of pages.
*/ */
static struct page *saveable_page(struct zone *zone, unsigned long pfn) static struct page *saveable_page(struct zone *zone, unsigned long pfn)
...@@ -1167,10 +1270,8 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn) ...@@ -1167,10 +1270,8 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
} }
/** /**
* count_data_pages - compute the total number of saveable non-highmem * count_data_pages - Compute the total number of saveable non-highmem pages.
* pages.
*/ */
static unsigned int count_data_pages(void) static unsigned int count_data_pages(void)
{ {
struct zone *zone; struct zone *zone;
...@@ -1190,7 +1291,8 @@ static unsigned int count_data_pages(void) ...@@ -1190,7 +1291,8 @@ static unsigned int count_data_pages(void)
return n; return n;
} }
/* This is needed, because copy_page and memcpy are not usable for copying /*
* This is needed, because copy_page and memcpy are not usable for copying
* task structs. * task structs.
*/ */
static inline void do_copy_page(long *dst, long *src) static inline void do_copy_page(long *dst, long *src)
...@@ -1201,12 +1303,12 @@ static inline void do_copy_page(long *dst, long *src) ...@@ -1201,12 +1303,12 @@ static inline void do_copy_page(long *dst, long *src)
*dst++ = *src++; *dst++ = *src++;
} }
/** /**
* safe_copy_page - check if the page we are going to copy is marked as * safe_copy_page - Copy a page in a safe way.
* present in the kernel page tables (this always is the case if *
* CONFIG_DEBUG_PAGEALLOC is not set and in that case * Check if the page we are going to copy is marked as present in the kernel
* kernel_page_present() always returns 'true'). * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
* and in that case kernel_page_present() always returns 'true').
*/ */
static void safe_copy_page(void *dst, struct page *s_page) static void safe_copy_page(void *dst, struct page *s_page)
{ {
...@@ -1219,10 +1321,8 @@ static void safe_copy_page(void *dst, struct page *s_page) ...@@ -1219,10 +1321,8 @@ static void safe_copy_page(void *dst, struct page *s_page)
} }
} }
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
static inline struct page * static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
page_is_saveable(struct zone *zone, unsigned long pfn)
{ {
return is_highmem(zone) ? return is_highmem(zone) ?
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
...@@ -1243,7 +1343,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) ...@@ -1243,7 +1343,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
kunmap_atomic(src); kunmap_atomic(src);
} else { } else {
if (PageHighMem(d_page)) { if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel /*
* The page pointed to by src may contain some kernel
* data modified by kmap_atomic() * data modified by kmap_atomic()
*/ */
safe_copy_page(buffer, s_page); safe_copy_page(buffer, s_page);
...@@ -1265,8 +1366,8 @@ static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) ...@@ -1265,8 +1366,8 @@ static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
} }
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
static void static void copy_data_pages(struct memory_bitmap *copy_bm,
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) struct memory_bitmap *orig_bm)
{ {
struct zone *zone; struct zone *zone;
unsigned long pfn; unsigned long pfn;
...@@ -1315,12 +1416,11 @@ static struct memory_bitmap orig_bm; ...@@ -1315,12 +1416,11 @@ static struct memory_bitmap orig_bm;
static struct memory_bitmap copy_bm; static struct memory_bitmap copy_bm;
/** /**
* swsusp_free - free pages allocated for the suspend. * swsusp_free - Free pages allocated for hibernation image.
* *
* Suspend pages are alocated before the atomic copy is made, so we * Image pages are alocated before snapshot creation, so they need to be
* need to release them after the resume. * released after resume.
*/ */
void swsusp_free(void) void swsusp_free(void)
{ {
unsigned long fb_pfn, fr_pfn; unsigned long fb_pfn, fr_pfn;
...@@ -1351,6 +1451,7 @@ void swsusp_free(void) ...@@ -1351,6 +1451,7 @@ void swsusp_free(void)
memory_bm_clear_current(forbidden_pages_map); memory_bm_clear_current(forbidden_pages_map);
memory_bm_clear_current(free_pages_map); memory_bm_clear_current(free_pages_map);
hibernate_restore_unprotect_page(page_address(page));
__free_page(page); __free_page(page);
goto loop; goto loop;
} }
...@@ -1362,6 +1463,7 @@ void swsusp_free(void) ...@@ -1362,6 +1463,7 @@ void swsusp_free(void)
buffer = NULL; buffer = NULL;
alloc_normal = 0; alloc_normal = 0;
alloc_highmem = 0; alloc_highmem = 0;
hibernate_restore_protection_end();
} }
/* Helper functions used for the shrinking of memory. */ /* Helper functions used for the shrinking of memory. */
...@@ -1369,7 +1471,7 @@ void swsusp_free(void) ...@@ -1369,7 +1471,7 @@ void swsusp_free(void)
#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
/** /**
* preallocate_image_pages - Allocate a number of pages for hibernation image * preallocate_image_pages - Allocate a number of pages for hibernation image.
* @nr_pages: Number of page frames to allocate. * @nr_pages: Number of page frames to allocate.
* @mask: GFP flags to use for the allocation. * @mask: GFP flags to use for the allocation.
* *
...@@ -1419,7 +1521,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages) ...@@ -1419,7 +1521,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages)
} }
/** /**
* __fraction - Compute (an approximation of) x * (multiplier / base) * __fraction - Compute (an approximation of) x * (multiplier / base).
*/ */
static unsigned long __fraction(u64 x, u64 multiplier, u64 base) static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{ {
...@@ -1451,7 +1553,7 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, ...@@ -1451,7 +1553,7 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
/** /**
* free_unnecessary_pages - Release preallocated pages not needed for the image * free_unnecessary_pages - Release preallocated pages not needed for the image.
*/ */
static unsigned long free_unnecessary_pages(void) static unsigned long free_unnecessary_pages(void)
{ {
...@@ -1505,7 +1607,7 @@ static unsigned long free_unnecessary_pages(void) ...@@ -1505,7 +1607,7 @@ static unsigned long free_unnecessary_pages(void)
} }
/** /**
* minimum_image_size - Estimate the minimum acceptable size of an image * minimum_image_size - Estimate the minimum acceptable size of an image.
* @saveable: Number of saveable pages in the system. * @saveable: Number of saveable pages in the system.
* *
* We want to avoid attempting to free too much memory too hard, so estimate the * We want to avoid attempting to free too much memory too hard, so estimate the
...@@ -1535,7 +1637,7 @@ static unsigned long minimum_image_size(unsigned long saveable) ...@@ -1535,7 +1637,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
} }
/** /**
* hibernate_preallocate_memory - Preallocate memory for hibernation image * hibernate_preallocate_memory - Preallocate memory for hibernation image.
* *
* To create a hibernation image it is necessary to make a copy of every page * To create a hibernation image it is necessary to make a copy of every page
* frame in use. We also need a number of page frames to be free during * frame in use. We also need a number of page frames to be free during
...@@ -1708,10 +1810,11 @@ int hibernate_preallocate_memory(void) ...@@ -1708,10 +1810,11 @@ int hibernate_preallocate_memory(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/** /**
* count_pages_for_highmem - compute the number of non-highmem pages * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
* that will be necessary for creating copies of highmem pages. *
* Compute the number of non-highmem pages that will be necessary for creating
* copies of highmem pages.
*/ */
static unsigned int count_pages_for_highmem(unsigned int nr_highmem) static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
{ {
unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
...@@ -1724,15 +1827,12 @@ static unsigned int count_pages_for_highmem(unsigned int nr_highmem) ...@@ -1724,15 +1827,12 @@ static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
return nr_highmem; return nr_highmem;
} }
#else #else
static unsigned int static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
/** /**
* enough_free_mem - Make sure we have enough free memory for the * enough_free_mem - Check if there is enough free memory for the image.
* snapshot image.
*/ */
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
{ {
struct zone *zone; struct zone *zone;
...@@ -1751,10 +1851,11 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) ...@@ -1751,10 +1851,11 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/** /**
* get_highmem_buffer - if there are some highmem pages in the suspend * get_highmem_buffer - Allocate a buffer for highmem pages.
* image, we may need the buffer to copy them and/or load their data. *
* If there are some highmem pages in the hibernation image, we may need a
* buffer to copy them and/or load their data.
*/ */
static inline int get_highmem_buffer(int safe_needed) static inline int get_highmem_buffer(int safe_needed)
{ {
buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
...@@ -1762,13 +1863,13 @@ static inline int get_highmem_buffer(int safe_needed) ...@@ -1762,13 +1863,13 @@ static inline int get_highmem_buffer(int safe_needed)
} }
/** /**
* alloc_highmem_image_pages - allocate some highmem pages for the image. * alloc_highmem_image_pages - Allocate some highmem pages for the image.
* Try to allocate as many pages as needed, but if the number of free *
* highmem pages is lesser than that, allocate them all. * Try to allocate as many pages as needed, but if the number of free highmem
* pages is less than that, allocate them all.
*/ */
static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
static inline unsigned int unsigned int nr_highmem)
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
{ {
unsigned int to_alloc = count_free_highmem_pages(); unsigned int to_alloc = count_free_highmem_pages();
...@@ -1787,12 +1888,12 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem) ...@@ -1787,12 +1888,12 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
#else #else
static inline int get_highmem_buffer(int safe_needed) { return 0; } static inline int get_highmem_buffer(int safe_needed) { return 0; }
static inline unsigned int static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; } unsigned int n) { return 0; }
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
/** /**
* swsusp_alloc - allocate memory for the suspend image * swsusp_alloc - Allocate memory for hibernation image.
* *
* We first try to allocate as many highmem pages as there are * We first try to allocate as many highmem pages as there are
* saveable highmem pages in the system. If that fails, we allocate * saveable highmem pages in the system. If that fails, we allocate
...@@ -1802,9 +1903,8 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; } ...@@ -1802,9 +1903,8 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
* also be located in the high memory, because of the way in which * also be located in the high memory, because of the way in which
* copy_data_pages() works. * copy_data_pages() works.
*/ */
static int swsusp_alloc(struct memory_bitmap *orig_bm,
static int struct memory_bitmap *copy_bm,
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem) unsigned int nr_pages, unsigned int nr_highmem)
{ {
if (nr_highmem > 0) { if (nr_highmem > 0) {
...@@ -1855,7 +1955,8 @@ asmlinkage __visible int swsusp_save(void) ...@@ -1855,7 +1955,8 @@ asmlinkage __visible int swsusp_save(void)
return -ENOMEM; return -ENOMEM;
} }
/* During allocating of suspend pagedir, new cold pages may appear. /*
* During allocating of suspend pagedir, new cold pages may appear.
* Kill them. * Kill them.
*/ */
drain_local_pages(NULL); drain_local_pages(NULL);
...@@ -1918,12 +2019,14 @@ static int init_header(struct swsusp_info *info) ...@@ -1918,12 +2019,14 @@ static int init_header(struct swsusp_info *info)
} }
/** /**
* pack_pfns - pfns corresponding to the set bits found in the bitmap @bm * pack_pfns - Prepare PFNs for saving.
* are stored in the array @buf[] (1 page at a time) * @bm: Memory bitmap.
* @buf: Memory buffer to store the PFNs in.
*
* PFNs corresponding to set bits in @bm are stored in the area of memory
* pointed to by @buf (1 page at a time).
*/ */
static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
static inline void
pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
{ {
int j; int j;
...@@ -1937,22 +2040,21 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm) ...@@ -1937,22 +2040,21 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
} }
/** /**
* snapshot_read_next - used for reading the system memory snapshot. * snapshot_read_next - Get the address to read the next image page from.
* @handle: Snapshot handle to be used for the reading.
* *
* On the first call to it @handle should point to a zeroed * On the first call, @handle should point to a zeroed snapshot_handle
* snapshot_handle structure. The structure gets updated and a pointer * structure. The structure gets populated then and a pointer to it should be
* to it should be passed to this function every next time. * passed to this function every next time.
* *
* On success the function returns a positive number. Then, the caller * On success, the function returns a positive number. Then, the caller
* is allowed to read up to the returned number of bytes from the memory * is allowed to read up to the returned number of bytes from the memory
* location computed by the data_of() macro. * location computed by the data_of() macro.
* *
* The function returns 0 to indicate the end of data stream condition, * The function returns 0 to indicate the end of the data stream condition,
* and a negative number is returned on error. In such cases the * and negative numbers are returned on errors. If that happens, the structure
* structure pointed to by @handle is not updated and should not be used * pointed to by @handle is not updated and should not be used any more.
* any more.
*/ */
int snapshot_read_next(struct snapshot_handle *handle) int snapshot_read_next(struct snapshot_handle *handle)
{ {
if (handle->cur > nr_meta_pages + nr_copy_pages) if (handle->cur > nr_meta_pages + nr_copy_pages)
...@@ -1981,7 +2083,8 @@ int snapshot_read_next(struct snapshot_handle *handle) ...@@ -1981,7 +2083,8 @@ int snapshot_read_next(struct snapshot_handle *handle)
page = pfn_to_page(memory_bm_next_pfn(&copy_bm)); page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
if (PageHighMem(page)) { if (PageHighMem(page)) {
/* Highmem pages are copied to the buffer, /*
* Highmem pages are copied to the buffer,
* because we can't return with a kmapped * because we can't return with a kmapped
* highmem page (we may not be called again). * highmem page (we may not be called again).
*/ */
...@@ -1999,53 +2102,41 @@ int snapshot_read_next(struct snapshot_handle *handle) ...@@ -1999,53 +2102,41 @@ int snapshot_read_next(struct snapshot_handle *handle)
return PAGE_SIZE; return PAGE_SIZE;
} }
/** static void duplicate_memory_bitmap(struct memory_bitmap *dst,
* mark_unsafe_pages - mark the pages that cannot be used for storing struct memory_bitmap *src)
* the image during resume, because they conflict with the pages that
* had been used before suspend
*/
static int mark_unsafe_pages(struct memory_bitmap *bm)
{ {
struct zone *zone; unsigned long pfn;
unsigned long pfn, max_zone_pfn;
/* Clear page flags */
for_each_populated_zone(zone) {
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
}
/* Mark pages that correspond to the "original" pfns as "unsafe" */ memory_bm_position_reset(src);
memory_bm_position_reset(bm); pfn = memory_bm_next_pfn(src);
do { while (pfn != BM_END_OF_MAP) {
pfn = memory_bm_next_pfn(bm); memory_bm_set_bit(dst, pfn);
if (likely(pfn != BM_END_OF_MAP)) { pfn = memory_bm_next_pfn(src);
if (likely(pfn_valid(pfn)))
swsusp_set_page_free(pfn_to_page(pfn));
else
return -EFAULT;
} }
} while (pfn != BM_END_OF_MAP);
allocated_unsafe_pages = 0;
return 0;
} }
static void /**
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src) * mark_unsafe_pages - Mark pages that were used before hibernation.
*
* Mark the pages that cannot be used for storing the image during restoration,
* because they conflict with the pages that had been used before hibernation.
*/
static void mark_unsafe_pages(struct memory_bitmap *bm)
{ {
unsigned long pfn; unsigned long pfn;
memory_bm_position_reset(src); /* Clear the "free"/"unsafe" bit for all PFNs */
pfn = memory_bm_next_pfn(src); memory_bm_position_reset(free_pages_map);
pfn = memory_bm_next_pfn(free_pages_map);
while (pfn != BM_END_OF_MAP) { while (pfn != BM_END_OF_MAP) {
memory_bm_set_bit(dst, pfn); memory_bm_clear_current(free_pages_map);
pfn = memory_bm_next_pfn(src); pfn = memory_bm_next_pfn(free_pages_map);
} }
/* Mark pages that correspond to the "original" PFNs as "unsafe" */
duplicate_memory_bitmap(free_pages_map, bm);
allocated_unsafe_pages = 0;
} }
static int check_header(struct swsusp_info *info) static int check_header(struct swsusp_info *info)
...@@ -2063,11 +2154,9 @@ static int check_header(struct swsusp_info *info) ...@@ -2063,11 +2154,9 @@ static int check_header(struct swsusp_info *info)
} }
/** /**
* load header - check the image header and copy data from it * load header - Check the image header and copy the data from it.
*/ */
static int load_header(struct swsusp_info *info)
static int
load_header(struct swsusp_info *info)
{ {
int error; int error;
...@@ -2081,8 +2170,12 @@ load_header(struct swsusp_info *info) ...@@ -2081,8 +2170,12 @@ load_header(struct swsusp_info *info)
} }
/** /**
* unpack_orig_pfns - for each element of @buf[] (1 page at a time) set * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
* the corresponding bit in the memory bitmap @bm * @bm: Memory bitmap.
* @buf: Area of memory containing the PFNs.
*
* For each element of the array pointed to by @buf (1 page at a time), set the
* corresponding bit in @bm.
*/ */
static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
{ {
...@@ -2095,7 +2188,7 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) ...@@ -2095,7 +2188,7 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
/* Extract and buffer page key for data page (s390 only). */ /* Extract and buffer page key for data page (s390 only). */
page_key_memorize(buf + j); page_key_memorize(buf + j);
if (memory_bm_pfn_present(bm, buf[j])) if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]); memory_bm_set_bit(bm, buf[j]);
else else
return -EFAULT; return -EFAULT;
...@@ -2104,13 +2197,9 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) ...@@ -2104,13 +2197,9 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
return 0; return 0;
} }
/* List of "safe" pages that may be used to store data loaded from the suspend
* image
*/
static struct linked_page *safe_pages_list;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* struct highmem_pbe is used for creating the list of highmem pages that /*
* struct highmem_pbe is used for creating the list of highmem pages that
* should be restored atomically during the resume from disk, because the page * should be restored atomically during the resume from disk, because the page
* frames they have occupied before the suspend are in use. * frames they have occupied before the suspend are in use.
*/ */
...@@ -2120,7 +2209,8 @@ struct highmem_pbe { ...@@ -2120,7 +2209,8 @@ struct highmem_pbe {
struct highmem_pbe *next; struct highmem_pbe *next;
}; };
/* List of highmem PBEs needed for restoring the highmem pages that were /*
* List of highmem PBEs needed for restoring the highmem pages that were
* allocated before the suspend and included in the suspend image, but have * allocated before the suspend and included in the suspend image, but have
* also been allocated by the "resume" kernel, so their contents cannot be * also been allocated by the "resume" kernel, so their contents cannot be
* written directly to their "original" page frames. * written directly to their "original" page frames.
...@@ -2128,11 +2218,11 @@ struct highmem_pbe { ...@@ -2128,11 +2218,11 @@ struct highmem_pbe {
static struct highmem_pbe *highmem_pblist; static struct highmem_pbe *highmem_pblist;
/** /**
* count_highmem_image_pages - compute the number of highmem pages in the * count_highmem_image_pages - Compute the number of highmem pages in the image.
* suspend image. The bits in the memory bitmap @bm that correspond to the * @bm: Memory bitmap.
* image pages are assumed to be set. *
* The bits in @bm that correspond to image pages are assumed to be set.
*/ */
static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
{ {
unsigned long pfn; unsigned long pfn;
...@@ -2149,24 +2239,25 @@ static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) ...@@ -2149,24 +2239,25 @@ static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
return cnt; return cnt;
} }
/**
* prepare_highmem_image - try to allocate as many highmem pages as
* there are highmem image pages (@nr_highmem_p points to the variable
* containing the number of highmem image pages). The pages that are
* "safe" (ie. will not be overwritten when the suspend image is
* restored) have the corresponding bits set in @bm (it must be
* unitialized).
*
* NOTE: This function should not be called if there are no highmem
* image pages.
*/
static unsigned int safe_highmem_pages; static unsigned int safe_highmem_pages;
static struct memory_bitmap *safe_highmem_bm; static struct memory_bitmap *safe_highmem_bm;
static int /**
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) * prepare_highmem_image - Allocate memory for loading highmem data from image.
* @bm: Pointer to an uninitialized memory bitmap structure.
* @nr_highmem_p: Pointer to the number of highmem image pages.
*
* Try to allocate as many highmem pages as there are highmem image pages
* (@nr_highmem_p points to the variable containing the number of highmem image
* pages). The pages that are "safe" (ie. will not be overwritten when the
* hibernation image is restored entirely) have the corresponding bits set in
* @bm (it must be unitialized).
*
* NOTE: This function should not be called if there are no highmem image pages.
*/
static int prepare_highmem_image(struct memory_bitmap *bm,
unsigned int *nr_highmem_p)
{ {
unsigned int to_alloc; unsigned int to_alloc;
...@@ -2201,9 +2292,13 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) ...@@ -2201,9 +2292,13 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
return 0; return 0;
} }
static struct page *last_highmem_page;
/** /**
* get_highmem_page_buffer - for given highmem image page find the buffer * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
* that suspend_write_next() should set for its caller to write to. *
* For a given highmem image page get a buffer that suspend_write_next() should
* return to its caller to write to.
* *
* If the page is to be saved to its "original" page frame or a copy of * If the page is to be saved to its "original" page frame or a copy of
* the page is to be made in the highmem, @buffer is returned. Otherwise, * the page is to be made in the highmem, @buffer is returned. Otherwise,
...@@ -2214,26 +2309,25 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) ...@@ -2214,26 +2309,25 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
* the page's contents to @buffer, so they will have to be copied to the * the page's contents to @buffer, so they will have to be copied to the
* right location on the next call to suspend_write_next() and it is done * right location on the next call to suspend_write_next() and it is done
* with the help of copy_last_highmem_page(). For this purpose, if * with the help of copy_last_highmem_page(). For this purpose, if
* @buffer is returned, @last_highmem page is set to the page to which * @buffer is returned, @last_highmem_page is set to the page to which
* the data will have to be copied from @buffer. * the data will have to be copied from @buffer.
*/ */
static void *get_highmem_page_buffer(struct page *page,
static struct page *last_highmem_page; struct chain_allocator *ca)
static void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{ {
struct highmem_pbe *pbe; struct highmem_pbe *pbe;
void *kaddr; void *kaddr;
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
/* We have allocated the "original" page frame and we can /*
* We have allocated the "original" page frame and we can
* use it directly to store the loaded page. * use it directly to store the loaded page.
*/ */
last_highmem_page = page; last_highmem_page = page;
return buffer; return buffer;
} }
/* The "original" page frame has not been allocated and we have to /*
* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page. * use a "safe" page frame to store the loaded page.
*/ */
pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
...@@ -2263,11 +2357,12 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) ...@@ -2263,11 +2357,12 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
} }
/** /**
* copy_last_highmem_page - copy the contents of a highmem image from * copy_last_highmem_page - Copy most the most recent highmem image page.
* @buffer, where the caller of snapshot_write_next() has place them, *
* to the right location represented by @last_highmem_page . * Copy the contents of a highmem image from @buffer, where the caller of
* snapshot_write_next() has stored them, to the right location represented by
* @last_highmem_page .
*/ */
static void copy_last_highmem_page(void) static void copy_last_highmem_page(void)
{ {
if (last_highmem_page) { if (last_highmem_page) {
...@@ -2294,17 +2389,13 @@ static inline void free_highmem_data(void) ...@@ -2294,17 +2389,13 @@ static inline void free_highmem_data(void)
free_image_page(buffer, PG_UNSAFE_CLEAR); free_image_page(buffer, PG_UNSAFE_CLEAR);
} }
#else #else
static unsigned int static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
static inline int static inline int prepare_highmem_image(struct memory_bitmap *bm,
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) unsigned int *nr_highmem_p) { return 0; }
{
return 0;
}
static inline void * static inline void *get_highmem_page_buffer(struct page *page,
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) struct chain_allocator *ca)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -2314,27 +2405,27 @@ static inline int last_highmem_page_copied(void) { return 1; } ...@@ -2314,27 +2405,27 @@ static inline int last_highmem_page_copied(void) { return 1; }
static inline void free_highmem_data(void) {} static inline void free_highmem_data(void) {}
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
/** /**
* prepare_image - use the memory bitmap @bm to mark the pages that will * prepare_image - Make room for loading hibernation image.
* be overwritten in the process of restoring the system memory state * @new_bm: Unitialized memory bitmap structure.
* from the suspend image ("unsafe" pages) and allocate memory for the * @bm: Memory bitmap with unsafe pages marked.
* image. *
* Use @bm to mark the pages that will be overwritten in the process of
* restoring the system memory state from the suspend image ("unsafe" pages)
* and allocate memory for the image.
* *
* The idea is to allocate a new memory bitmap first and then allocate * The idea is to allocate a new memory bitmap first and then allocate
* as many pages as needed for the image data, but not to assign these * as many pages as needed for image data, but without specifying what those
* pages to specific tasks initially. Instead, we just mark them as * pages will be used for just yet. Instead, we mark them all as allocated and
* allocated and create a lists of "safe" pages that will be used * create a lists of "safe" pages to be used later. On systems with high
* later. On systems with high memory a list of "safe" highmem pages is * memory a list of "safe" highmem pages is created too.
* also created.
*/ */
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
static int
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
{ {
unsigned int nr_pages, nr_highmem; unsigned int nr_pages, nr_highmem;
struct linked_page *sp_list, *lp; struct linked_page *lp;
int error; int error;
/* If there is no highmem, the buffer will not be necessary */ /* If there is no highmem, the buffer will not be necessary */
...@@ -2342,9 +2433,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) ...@@ -2342,9 +2433,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
buffer = NULL; buffer = NULL;
nr_highmem = count_highmem_image_pages(bm); nr_highmem = count_highmem_image_pages(bm);
error = mark_unsafe_pages(bm); mark_unsafe_pages(bm);
if (error)
goto Free;
error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
if (error) if (error)
...@@ -2357,14 +2446,15 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) ...@@ -2357,14 +2446,15 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
if (error) if (error)
goto Free; goto Free;
} }
/* Reserve some safe pages for potential later use. /*
* Reserve some safe pages for potential later use.
* *
* NOTE: This way we make sure there will be enough safe pages for the * NOTE: This way we make sure there will be enough safe pages for the
* chain_alloc() in get_buffer(). It is a bit wasteful, but * chain_alloc() in get_buffer(). It is a bit wasteful, but
* nr_copy_pages cannot be greater than 50% of the memory anyway. * nr_copy_pages cannot be greater than 50% of the memory anyway.
*
* nr_copy_pages cannot be less than allocated_unsafe_pages too.
*/ */
sp_list = NULL;
/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) { while (nr_pages > 0) {
...@@ -2373,12 +2463,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) ...@@ -2373,12 +2463,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
error = -ENOMEM; error = -ENOMEM;
goto Free; goto Free;
} }
lp->next = sp_list; lp->next = safe_pages_list;
sp_list = lp; safe_pages_list = lp;
nr_pages--; nr_pages--;
} }
/* Preallocate memory for the image */ /* Preallocate memory for the image */
safe_pages_list = NULL;
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) { while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
...@@ -2396,12 +2485,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) ...@@ -2396,12 +2485,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
swsusp_set_page_free(virt_to_page(lp)); swsusp_set_page_free(virt_to_page(lp));
nr_pages--; nr_pages--;
} }
/* Free the reserved safe pages so that chain_alloc() can use them */
while (sp_list) {
lp = sp_list->next;
free_image_page(sp_list, PG_UNSAFE_CLEAR);
sp_list = lp;
}
return 0; return 0;
Free: Free:
...@@ -2410,10 +2493,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) ...@@ -2410,10 +2493,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
} }
/** /**
* get_buffer - compute the address that snapshot_write_next() should * get_buffer - Get the address to store the next image data page.
* set for its caller to write to. *
* Get the address that snapshot_write_next() should return to its caller to
* write to.
*/ */
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
{ {
struct pbe *pbe; struct pbe *pbe;
...@@ -2428,12 +2512,14 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) ...@@ -2428,12 +2512,14 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
return get_highmem_page_buffer(page, ca); return get_highmem_page_buffer(page, ca);
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
/* We have allocated the "original" page frame and we can /*
* We have allocated the "original" page frame and we can
* use it directly to store the loaded page. * use it directly to store the loaded page.
*/ */
return page_address(page); return page_address(page);
/* The "original" page frame has not been allocated and we have to /*
* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page. * use a "safe" page frame to store the loaded page.
*/ */
pbe = chain_alloc(ca, sizeof(struct pbe)); pbe = chain_alloc(ca, sizeof(struct pbe));
...@@ -2450,22 +2536,21 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) ...@@ -2450,22 +2536,21 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
} }
/** /**
* snapshot_write_next - used for writing the system memory snapshot. * snapshot_write_next - Get the address to store the next image page.
* @handle: Snapshot handle structure to guide the writing.
* *
* On the first call to it @handle should point to a zeroed * On the first call, @handle should point to a zeroed snapshot_handle
* snapshot_handle structure. The structure gets updated and a pointer * structure. The structure gets populated then and a pointer to it should be
* to it should be passed to this function every next time. * passed to this function every next time.
* *
* On success the function returns a positive number. Then, the caller * On success, the function returns a positive number. Then, the caller
* is allowed to write up to the returned number of bytes to the memory * is allowed to write up to the returned number of bytes to the memory
* location computed by the data_of() macro. * location computed by the data_of() macro.
* *
* The function returns 0 to indicate the "end of file" condition, * The function returns 0 to indicate the "end of file" condition. Negative
* and a negative number is returned on error. In such cases the * numbers are returned on errors, in which cases the structure pointed to by
* structure pointed to by @handle is not updated and should not be used * @handle is not updated and should not be used any more.
* any more.
*/ */
int snapshot_write_next(struct snapshot_handle *handle) int snapshot_write_next(struct snapshot_handle *handle)
{ {
static struct chain_allocator ca; static struct chain_allocator ca;
...@@ -2491,6 +2576,8 @@ int snapshot_write_next(struct snapshot_handle *handle) ...@@ -2491,6 +2576,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error) if (error)
return error; return error;
safe_pages_list = NULL;
error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY); error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
if (error) if (error)
return error; return error;
...@@ -2500,6 +2587,7 @@ int snapshot_write_next(struct snapshot_handle *handle) ...@@ -2500,6 +2587,7 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error) if (error)
return error; return error;
hibernate_restore_protection_begin();
} else if (handle->cur <= nr_meta_pages + 1) { } else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, &copy_bm); error = unpack_orig_pfns(buffer, &copy_bm);
if (error) if (error)
...@@ -2522,6 +2610,7 @@ int snapshot_write_next(struct snapshot_handle *handle) ...@@ -2522,6 +2610,7 @@ int snapshot_write_next(struct snapshot_handle *handle)
copy_last_highmem_page(); copy_last_highmem_page();
/* Restore page key for data page (s390 only). */ /* Restore page key for data page (s390 only). */
page_key_write(handle->buffer); page_key_write(handle->buffer);
hibernate_restore_protect_page(handle->buffer);
handle->buffer = get_buffer(&orig_bm, &ca); handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer)) if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer); return PTR_ERR(handle->buffer);
...@@ -2533,22 +2622,23 @@ int snapshot_write_next(struct snapshot_handle *handle) ...@@ -2533,22 +2622,23 @@ int snapshot_write_next(struct snapshot_handle *handle)
} }
/** /**
* snapshot_write_finalize - must be called after the last call to * snapshot_write_finalize - Complete the loading of a hibernation image.
* snapshot_write_next() in case the last page in the image happens *
* to be a highmem page and its contents should be stored in the * Must be called after the last call to snapshot_write_next() in case the last
* highmem. Additionally, it releases the memory that will not be * page in the image happens to be a highmem page and its contents should be
* used any more. * stored in highmem. Additionally, it recycles bitmap memory that's not
* necessary any more.
*/ */
void snapshot_write_finalize(struct snapshot_handle *handle) void snapshot_write_finalize(struct snapshot_handle *handle)
{ {
copy_last_highmem_page(); copy_last_highmem_page();
/* Restore page key for data page (s390 only). */ /* Restore page key for data page (s390 only). */
page_key_write(handle->buffer); page_key_write(handle->buffer);
page_key_free(); page_key_free();
/* Free only if we have loaded the image entirely */ hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); memory_bm_recycle(&orig_bm);
free_highmem_data(); free_highmem_data();
} }
} }
...@@ -2561,8 +2651,8 @@ int snapshot_image_loaded(struct snapshot_handle *handle) ...@@ -2561,8 +2651,8 @@ int snapshot_image_loaded(struct snapshot_handle *handle)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* Assumes that @buf is ready and points to a "safe" page */ /* Assumes that @buf is ready and points to a "safe" page */
static inline void static inline void swap_two_pages_data(struct page *p1, struct page *p2,
swap_two_pages_data(struct page *p1, struct page *p2, void *buf) void *buf)
{ {
void *kaddr1, *kaddr2; void *kaddr1, *kaddr2;
...@@ -2576,15 +2666,15 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf) ...@@ -2576,15 +2666,15 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
} }
/** /**
* restore_highmem - for each highmem page that was allocated before * restore_highmem - Put highmem image pages into their original locations.
* the suspend and included in the suspend image, and also has been *
* allocated by the "resume" kernel swap its current (ie. "before * For each highmem page that was in use before hibernation and is included in
* resume") contents with the previous (ie. "before suspend") one. * the image, and also has been allocated by the "restore" kernel, swap its
* current contents with the previous (ie. "before hibernation") ones.
* *
* If the resume eventually fails, we can call this function once * If the restore eventually fails, we can call this function once again and
* again and restore the "before resume" highmem state. * restore the highmem state as seen by the restore kernel.
*/ */
int restore_highmem(void) int restore_highmem(void)
{ {
struct highmem_pbe *pbe = highmem_pblist; struct highmem_pbe *pbe = highmem_pblist;
......
...@@ -266,16 +266,18 @@ static int suspend_test(int level) ...@@ -266,16 +266,18 @@ static int suspend_test(int level)
*/ */
static int suspend_prepare(suspend_state_t state) static int suspend_prepare(suspend_state_t state)
{ {
int error; int error, nr_calls = 0;
if (!sleep_state_supported(state)) if (!sleep_state_supported(state))
return -EPERM; return -EPERM;
pm_prepare_console(); pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
if (error) if (error) {
nr_calls--;
goto Finish; goto Finish;
}
trace_suspend_resume(TPS("freeze_processes"), 0, true); trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes(); error = suspend_freeze_processes();
...@@ -286,7 +288,7 @@ static int suspend_prepare(suspend_state_t state) ...@@ -286,7 +288,7 @@ static int suspend_prepare(suspend_state_t state)
suspend_stats.failed_freeze++; suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE); dpm_save_failed_step(SUSPEND_FREEZE);
Finish: Finish:
pm_notifier_call_chain(PM_POST_SUSPEND); __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
pm_restore_console(); pm_restore_console();
return error; return error;
} }
......
...@@ -348,6 +348,12 @@ static int swsusp_swap_check(void) ...@@ -348,6 +348,12 @@ static int swsusp_swap_check(void)
if (res < 0) if (res < 0)
blkdev_put(hib_resume_bdev, FMODE_WRITE); blkdev_put(hib_resume_bdev, FMODE_WRITE);
/*
* Update the resume device to the one actually used,
* so the test_resume mode can use it in case it is
* invoked from hibernate() to test the snapshot.
*/
swsusp_resume_device = hib_resume_bdev->bd_dev;
return res; return res;
} }
......
...@@ -47,7 +47,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1); ...@@ -47,7 +47,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp) static int snapshot_open(struct inode *inode, struct file *filp)
{ {
struct snapshot_data *data; struct snapshot_data *data;
int error; int error, nr_calls = 0;
if (!hibernation_available()) if (!hibernation_available())
return -EPERM; return -EPERM;
...@@ -74,9 +74,9 @@ static int snapshot_open(struct inode *inode, struct file *filp) ...@@ -74,9 +74,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
swap_type_of(swsusp_resume_device, 0, NULL) : -1; swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY; data->mode = O_RDONLY;
data->free_bitmaps = false; data->free_bitmaps = false;
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION); __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
} else { } else {
/* /*
* Resuming. We may need to wait for the image device to * Resuming. We may need to wait for the image device to
...@@ -86,13 +86,15 @@ static int snapshot_open(struct inode *inode, struct file *filp) ...@@ -86,13 +86,15 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1; data->swap = -1;
data->mode = O_WRONLY; data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE); error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (!error) { if (!error) {
error = create_basic_memory_bitmaps(); error = create_basic_memory_bitmaps();
data->free_bitmaps = !error; data->free_bitmaps = !error;
} } else
nr_calls--;
if (error) if (error)
pm_notifier_call_chain(PM_POST_RESTORE); __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
} }
if (error) if (error)
atomic_inc(&snapshot_device_available); atomic_inc(&snapshot_device_available);
......
...@@ -4369,8 +4369,8 @@ static void show_pwq(struct pool_workqueue *pwq) ...@@ -4369,8 +4369,8 @@ static void show_pwq(struct pool_workqueue *pwq)
/** /**
* show_workqueue_state - dump workqueue state * show_workqueue_state - dump workqueue state
* *
* Called from a sysrq handler and prints out all busy workqueues and * Called from a sysrq handler or try_to_freeze_tasks() and prints out
* pools. * all busy workqueues and pools.
*/ */
void show_workqueue_state(void) void show_workqueue_state(void)
{ {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
CC = $(CROSS_COMPILE)gcc CC = $(CROSS_COMPILE)gcc
BUILD_OUTPUT := $(CURDIR) BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr PREFIX ?= /usr
DESTDIR := DESTDIR ?=
ifeq ("$(origin O)", "command line") ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O) BUILD_OUTPUT := $(O)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment