Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3cc8a5f4
Commit
3cc8a5f4
authored
Jan 09, 2009
by
Len Brown
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'suspend' into release
parents
d0302bc6
ada9cfdd
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
446 additions
and
201 deletions
+446
-201
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+26
-19
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/sleep.c
+2
-0
arch/x86/kernel/e820.c
arch/x86/kernel/e820.c
+21
-0
drivers/acpi/main.c
drivers/acpi/main.c
+59
-8
include/linux/acpi.h
include/linux/acpi.h
+1
-0
include/linux/suspend.h
include/linux/suspend.h
+13
-0
kernel/power/disk.c
kernel/power/disk.c
+3
-3
kernel/power/snapshot.c
kernel/power/snapshot.c
+199
-171
kernel/power/swsusp.c
kernel/power/swsusp.c
+122
-0
No files found.
Documentation/kernel-parameters.txt
View file @
3cc8a5f4
...
...
@@ -150,16 +150,20 @@ and is between 256 and 4096 characters. It is defined in the file
default: 0
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering }
See Documentation/power/video.txt for s3_bios and s3_mode.
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
old_ordering, s4_nonvs }
See Documentation/power/video.txt for information on
s3_bios and s3_mode.
s3_beep is for debugging; it makes the PC's speaker beep
as soon as the kernel's real-mode entry point is called.
s4_nohwsig prevents ACPI hardware signature from being
used during resume from hibernation.
old_ordering causes the ACPI 1.0 ordering of the _PTS
control method, wrt putting devices into low power
states, to be enforced (the ACPI 2.0 ordering of _PTS is
used by default).
control method, with respect to putting devices into
low power states, to be enforced (the ACPI 2.0 ordering
of _PTS is used by default).
s4_nonvs prevents the kernel from saving/restoring the
ACPI NVS memory during hibernation.
acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
Format: { level | edge | high | low }
...
...
@@ -194,7 +198,7 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_skip_timer_override [HW,ACPI]
Recognize and ignore IRQ0/pin2 Interrupt Override.
For broken nForce2 BIOS resulting in XT-PIC timer.
acpi_use_timer_override [HW,ACPI
}
acpi_use_timer_override [HW,ACPI
]
Use timer override. For some broken Nvidia NF5 boards
that require a timer override, but don't have
HPET
...
...
@@ -861,17 +865,19 @@ and is between 256 and 4096 characters. It is defined in the file
See Documentation/ide/ide.txt.
idle= [X86]
Format: idle=poll or idle=mwait, idle=halt, idle=nomwait
Poll forces a polling idle loop that can slightly improves the performance
of waking up a idle CPU, but will use a lot of power and make the system
run hot. Not recommended.
idle=mwait. On systems which support MONITOR/MWAIT but the kernel chose
to not use it because it doesn't save as much power as a normal idle
loop use the MONITOR/MWAIT idle loop anyways. Performance should be the same
as idle=poll.
idle=halt. Halt is forced to be used for CPU idle.
Format: idle=poll, idle=mwait, idle=halt, idle=nomwait
Poll forces a polling idle loop that can slightly
improve the performance of waking up a idle CPU, but
will use a lot of power and make the system run hot.
Not recommended.
idle=mwait: On systems which support MONITOR/MWAIT but
the kernel chose to not use it because it doesn't save
as much power as a normal idle loop, use the
MONITOR/MWAIT idle loop anyways. Performance should be
the same as idle=poll.
idle=halt: Halt is forced to be used for CPU idle.
In such case C2/C3 won't be used again.
idle=nomwait
.
Disable mwait for CPU C-states
idle=nomwait
:
Disable mwait for CPU C-states
ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
Claim all unknown PCI IDE storage controllers.
...
...
@@ -1053,8 +1059,8 @@ and is between 256 and 4096 characters. It is defined in the file
lapic [X86-32,APIC] Enable the local APIC even if BIOS
disabled it.
lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer
in
C2 power state.
lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer
in
C2 power state.
libata.dma= [LIBATA] DMA control
libata.dma=0 Disable all PATA and SATA DMA
...
...
@@ -2242,7 +2248,8 @@ and is between 256 and 4096 characters. It is defined in the file
thermal.psv= [HW,ACPI]
-1: disable all passive trip points
<degrees C>: override all passive trip points to this value
<degrees C>: override all passive trip points to this
value
thermal.tzp= [HW,ACPI]
Specify global default ACPI thermal zone polling rate
...
...
arch/x86/kernel/acpi/sleep.c
View file @
3cc8a5f4
...
...
@@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str)
#endif
if
(
strncmp
(
str
,
"old_ordering"
,
12
)
==
0
)
acpi_old_suspend_ordering
();
if
(
strncmp
(
str
,
"s4_nonvs"
,
8
)
==
0
)
acpi_s4_no_nvs
();
str
=
strchr
(
str
,
','
);
if
(
str
!=
NULL
)
str
+=
strspn
(
str
,
",
\t
"
);
...
...
arch/x86/kernel/e820.c
View file @
3cc8a5f4
...
...
@@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
#ifdef CONFIG_HIBERNATION
/**
* Mark ACPI NVS memory region, so that we can save/restore it during
* hibernation and the subsequent resume.
*/
static
int
__init
e820_mark_nvs_memory
(
void
)
{
int
i
;
for
(
i
=
0
;
i
<
e820
.
nr_map
;
i
++
)
{
struct
e820entry
*
ei
=
&
e820
.
map
[
i
];
if
(
ei
->
type
==
E820_NVS
)
hibernate_nvs_register
(
ei
->
addr
,
ei
->
size
);
}
return
0
;
}
core_initcall
(
e820_mark_nvs_memory
);
#endif
/*
* Early reserved memory areas.
*/
...
...
drivers/acpi/main.c
View file @
3cc8a5f4
...
...
@@ -101,6 +101,19 @@ void __init acpi_old_suspend_ordering(void)
* cases.
*/
static
bool
set_sci_en_on_resume
;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. However, it is not certain
* if this mechanism is going to work on all machines, so we allow the user to
* disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
* option.
*/
static
bool
s4_no_nvs
;
void
__init
acpi_s4_no_nvs
(
void
)
{
s4_no_nvs
=
true
;
}
/**
* acpi_pm_disable_gpes - Disable the GPEs.
...
...
@@ -394,9 +407,25 @@ void __init acpi_no_s4_hw_signature(void)
static
int
acpi_hibernation_begin
(
void
)
{
int
error
;
error
=
s4_no_nvs
?
0
:
hibernate_nvs_alloc
();
if
(
!
error
)
{
acpi_target_sleep_state
=
ACPI_STATE_S4
;
acpi_sleep_tts_switch
(
acpi_target_sleep_state
);
return
0
;
}
return
error
;
}
static
int
acpi_hibernation_pre_snapshot
(
void
)
{
int
error
=
acpi_pm_prepare
();
if
(
!
error
)
hibernate_nvs_save
();
return
error
;
}
static
int
acpi_hibernation_enter
(
void
)
...
...
@@ -417,6 +446,12 @@ static int acpi_hibernation_enter(void)
return
ACPI_SUCCESS
(
status
)
?
0
:
-
EFAULT
;
}
static
void
acpi_hibernation_finish
(
void
)
{
hibernate_nvs_free
();
acpi_pm_finish
();
}
static
void
acpi_hibernation_leave
(
void
)
{
/*
...
...
@@ -432,6 +467,8 @@ static void acpi_hibernation_leave(void)
"cannot resume!
\n
"
);
panic
(
"ACPI S4 hardware signature mismatch"
);
}
/* Restore the NVS memory area */
hibernate_nvs_restore
();
}
static
void
acpi_pm_enable_gpes
(
void
)
...
...
@@ -442,8 +479,8 @@ static void acpi_pm_enable_gpes(void)
static
struct
platform_hibernation_ops
acpi_hibernation_ops
=
{
.
begin
=
acpi_hibernation_begin
,
.
end
=
acpi_pm_end
,
.
pre_snapshot
=
acpi_
pm_prepare
,
.
finish
=
acpi_
pm
_finish
,
.
pre_snapshot
=
acpi_
hibernation_pre_snapshot
,
.
finish
=
acpi_
hibernation
_finish
,
.
prepare
=
acpi_pm_prepare
,
.
enter
=
acpi_hibernation_enter
,
.
leave
=
acpi_hibernation_leave
,
...
...
@@ -469,8 +506,22 @@ static int acpi_hibernation_begin_old(void)
error
=
acpi_sleep_prepare
(
ACPI_STATE_S4
);
if
(
!
error
)
{
if
(
!
s4_no_nvs
)
error
=
hibernate_nvs_alloc
();
if
(
!
error
)
acpi_target_sleep_state
=
ACPI_STATE_S4
;
}
return
error
;
}
static
int
acpi_hibernation_pre_snapshot_old
(
void
)
{
int
error
=
acpi_pm_disable_gpes
();
if
(
!
error
)
hibernate_nvs_save
();
return
error
;
}
...
...
@@ -481,8 +532,8 @@ static int acpi_hibernation_begin_old(void)
static
struct
platform_hibernation_ops
acpi_hibernation_ops_old
=
{
.
begin
=
acpi_hibernation_begin_old
,
.
end
=
acpi_pm_end
,
.
pre_snapshot
=
acpi_
pm_disable_gpes
,
.
finish
=
acpi_
pm
_finish
,
.
pre_snapshot
=
acpi_
hibernation_pre_snapshot_old
,
.
finish
=
acpi_
hibernation
_finish
,
.
prepare
=
acpi_pm_disable_gpes
,
.
enter
=
acpi_hibernation_enter
,
.
leave
=
acpi_hibernation_leave
,
...
...
include/linux/acpi.h
View file @
3cc8a5f4
...
...
@@ -270,6 +270,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
#ifdef CONFIG_PM_SLEEP
void
__init
acpi_no_s4_hw_signature
(
void
);
void
__init
acpi_old_suspend_ordering
(
void
);
void
__init
acpi_s4_no_nvs
(
void
);
#endif
/* CONFIG_PM_SLEEP */
#else
/* CONFIG_ACPI */
...
...
include/linux/suspend.h
View file @
3cc8a5f4
...
...
@@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern
void
hibernation_set_ops
(
struct
platform_hibernation_ops
*
ops
);
extern
int
hibernate
(
void
);
extern
int
hibernate_nvs_register
(
unsigned
long
start
,
unsigned
long
size
);
extern
int
hibernate_nvs_alloc
(
void
);
extern
void
hibernate_nvs_free
(
void
);
extern
void
hibernate_nvs_save
(
void
);
extern
void
hibernate_nvs_restore
(
void
);
#else
/* CONFIG_HIBERNATION */
static
inline
int
swsusp_page_is_forbidden
(
struct
page
*
p
)
{
return
0
;
}
static
inline
void
swsusp_set_page_free
(
struct
page
*
p
)
{}
...
...
@@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {}
static
inline
void
hibernation_set_ops
(
struct
platform_hibernation_ops
*
ops
)
{}
static
inline
int
hibernate
(
void
)
{
return
-
ENOSYS
;
}
static
inline
int
hibernate_nvs_register
(
unsigned
long
a
,
unsigned
long
b
)
{
return
0
;
}
static
inline
int
hibernate_nvs_alloc
(
void
)
{
return
0
;
}
static
inline
void
hibernate_nvs_free
(
void
)
{}
static
inline
void
hibernate_nvs_save
(
void
)
{}
static
inline
void
hibernate_nvs_restore
(
void
)
{}
#endif
/* CONFIG_HIBERNATION */
#ifdef CONFIG_PM_SLEEP
...
...
kernel/power/disk.c
View file @
3cc8a5f4
...
...
@@ -259,12 +259,12 @@ int hibernation_snapshot(int platform_mode)
{
int
error
,
ftrace_save
;
/* Free memory before shutting down devices. */
error
=
swsusp_shrink_memory
();
error
=
platform_begin
(
platform_mode
);
if
(
error
)
return
error
;
error
=
platform_begin
(
platform_mode
);
/* Free memory before shutting down devices. */
error
=
swsusp_shrink_memory
();
if
(
error
)
goto
Close
;
...
...
kernel/power/snapshot.c
View file @
3cc8a5f4
...
...
@@ -25,6 +25,7 @@
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
...
...
@@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
return
ret
;
}
static
void
chain_free
(
struct
chain_allocator
*
ca
,
int
clear_page_nosave
)
{
free_list_of_pages
(
ca
->
chain
,
clear_page_nosave
);
memset
(
ca
,
0
,
sizeof
(
struct
chain_allocator
));
}
/**
* Data types related to memory bitmaps.
*
...
...
@@ -233,7 +228,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
struct
bm_block
{
struct
bm_block
*
next
;
/* next element of the list
*/
struct
list_head
hook
;
/* hook into a list of bitmap blocks
*/
unsigned
long
start_pfn
;
/* pfn represented by the first bit */
unsigned
long
end_pfn
;
/* pfn represented by the last bit plus 1 */
unsigned
long
*
data
;
/* bitmap representing pages */
...
...
@@ -244,24 +239,15 @@ static inline unsigned long bm_block_bits(struct bm_block *bb)
return
bb
->
end_pfn
-
bb
->
start_pfn
;
}
struct
zone_bitmap
{
struct
zone_bitmap
*
next
;
/* next element of the list */
unsigned
long
start_pfn
;
/* minimal pfn in this zone */
unsigned
long
end_pfn
;
/* maximal pfn in this zone plus 1 */
struct
bm_block
*
bm_blocks
;
/* list of bitmap blocks */
struct
bm_block
*
cur_block
;
/* recently used bitmap block */
};
/* strcut bm_position is used for browsing memory bitmaps */
struct
bm_position
{
struct
zone_bitmap
*
zone_bm
;
struct
bm_block
*
block
;
int
bit
;
};
struct
memory_bitmap
{
struct
zone_bitmap
*
zone_bm_list
;
/* list of zone bitmap
s */
struct
list_head
blocks
;
/* list of bitmap block
s */
struct
linked_page
*
p_list
;
/* list of pages used to store zone
* bitmap objects and bitmap block
* objects
...
...
@@ -273,11 +259,7 @@ struct memory_bitmap {
static
void
memory_bm_position_reset
(
struct
memory_bitmap
*
bm
)
{
struct
zone_bitmap
*
zone_bm
;
zone_bm
=
bm
->
zone_bm_list
;
bm
->
cur
.
zone_bm
=
zone_bm
;
bm
->
cur
.
block
=
zone_bm
->
bm_blocks
;
bm
->
cur
.
block
=
list_entry
(
bm
->
blocks
.
next
,
struct
bm_block
,
hook
);
bm
->
cur
.
bit
=
0
;
}
...
...
@@ -285,151 +267,184 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
/**
* create_bm_block_list - create a list of block bitmap objects
* @nr_blocks - number of blocks to allocate
* @list - list to put the allocated blocks into
* @ca - chain allocator to be used for allocating memory
*/
static
inline
struct
bm_block
*
create_bm_block_list
(
unsigned
int
nr_blocks
,
struct
chain_allocator
*
ca
)
static
int
create_bm_block_list
(
unsigned
long
pages
,
struct
list_head
*
list
,
struct
chain_allocator
*
ca
)
{
struct
bm_block
*
bblist
=
NULL
;
unsigned
int
nr_blocks
=
DIV_ROUND_UP
(
pages
,
BM_BITS_PER_BLOCK
)
;
while
(
nr_blocks
--
>
0
)
{
struct
bm_block
*
bb
;
bb
=
chain_alloc
(
ca
,
sizeof
(
struct
bm_block
));
if
(
!
bb
)
return
NULL
;
bb
->
next
=
bblist
;
bblist
=
bb
;
return
-
ENOMEM
;
list_add
(
&
bb
->
hook
,
list
);
}
return
bblist
;
return
0
;
}
struct
mem_extent
{
struct
list_head
hook
;
unsigned
long
start
;
unsigned
long
end
;
};
/**
* create_zone_bm_list - create a list of zone bitmap objects
* free_mem_extents - free a list of memory extents
* @list - list of extents to empty
*/
static
void
free_mem_extents
(
struct
list_head
*
list
)
{
struct
mem_extent
*
ext
,
*
aux
;
static
inline
struct
zone_bitmap
*
create_zone_bm_list
(
unsigned
int
nr_zones
,
struct
chain_allocator
*
ca
)
list_for_each_entry_safe
(
ext
,
aux
,
list
,
hook
)
{
list_del
(
&
ext
->
hook
);
kfree
(
ext
);
}
}
/**
* create_mem_extents - create a list of memory extents representing
* contiguous ranges of PFNs
* @list - list to put the extents into
* @gfp_mask - mask to use for memory allocations
*/
static
int
create_mem_extents
(
struct
list_head
*
list
,
gfp_t
gfp_mask
)
{
struct
zone
_bitmap
*
zbmlist
=
NULL
;
struct
zone
*
zone
;
while
(
nr_zones
--
>
0
)
{
struct
zone_bitmap
*
zbm
;
INIT_LIST_HEAD
(
list
);
zbm
=
chain_alloc
(
ca
,
sizeof
(
struct
zone_bitmap
));
if
(
!
zbm
)
return
NULL
;
for_each_zone
(
zone
)
{
unsigned
long
zone_start
,
zone_end
;
struct
mem_extent
*
ext
,
*
cur
,
*
aux
;
if
(
!
populated_zone
(
zone
))
continue
;
zone_start
=
zone
->
zone_start_pfn
;
zone_end
=
zone
->
zone_start_pfn
+
zone
->
spanned_pages
;
list_for_each_entry
(
ext
,
list
,
hook
)
if
(
zone_start
<=
ext
->
end
)
break
;
if
(
&
ext
->
hook
==
list
||
zone_end
<
ext
->
start
)
{
/* New extent is necessary */
struct
mem_extent
*
new_ext
;
zbm
->
next
=
zbmlist
;
zbmlist
=
zbm
;
new_ext
=
kzalloc
(
sizeof
(
struct
mem_extent
),
gfp_mask
);
if
(
!
new_ext
)
{
free_mem_extents
(
list
);
return
-
ENOMEM
;
}
new_ext
->
start
=
zone_start
;
new_ext
->
end
=
zone_end
;
list_add_tail
(
&
new_ext
->
hook
,
&
ext
->
hook
);
continue
;
}
/* Merge this zone's range of PFNs with the existing one */
if
(
zone_start
<
ext
->
start
)
ext
->
start
=
zone_start
;
if
(
zone_end
>
ext
->
end
)
ext
->
end
=
zone_end
;
/* More merging may be possible */
cur
=
ext
;
list_for_each_entry_safe_continue
(
cur
,
aux
,
list
,
hook
)
{
if
(
zone_end
<
cur
->
start
)
break
;
if
(
zone_end
<
cur
->
end
)
ext
->
end
=
cur
->
end
;
list_del
(
&
cur
->
hook
);
kfree
(
cur
);
}
}
return
zbmlist
;
return
0
;
}
/**
* memory_bm_create - allocate memory for a memory bitmap
*/
static
int
memory_bm_create
(
struct
memory_bitmap
*
bm
,
gfp_t
gfp_mask
,
int
safe_needed
)
{
struct
chain_allocator
ca
;
struct
zone
*
zone
;
struct
zone_bitmap
*
zone_bm
;
struct
bm_block
*
bb
;
unsigned
int
nr
;
struct
list_head
mem_extents
;
struct
mem_extent
*
ext
;
int
error
;
chain_init
(
&
ca
,
gfp_mask
,
safe_needed
);
INIT_LIST_HEAD
(
&
bm
->
blocks
);
/* Compute the number of zones */
nr
=
0
;
for_each_zone
(
zone
)
if
(
populated_zone
(
zone
))
nr
++
;
/* Allocate the list of zones bitmap objects */
zone_bm
=
create_zone_bm_list
(
nr
,
&
ca
);
bm
->
zone_bm_list
=
zone_bm
;
if
(
!
zone_bm
)
{
chain_free
(
&
ca
,
PG_UNSAFE_CLEAR
);
return
-
ENOMEM
;
}
/* Initialize the zone bitmap objects */
for_each_zone
(
zone
)
{
unsigned
long
pfn
;
error
=
create_mem_extents
(
&
mem_extents
,
gfp_mask
);
if
(
error
)
return
error
;
if
(
!
populated_zone
(
zone
))
continue
;
list_for_each_entry
(
ext
,
&
mem_extents
,
hook
)
{
struct
bm_block
*
bb
;
unsigned
long
pfn
=
ext
->
start
;
unsigned
long
pages
=
ext
->
end
-
ext
->
start
;
zone_bm
->
start_pfn
=
zone
->
zone_start_pfn
;
zone_bm
->
end_pfn
=
zone
->
zone_start_pfn
+
zone
->
spanned_pages
;
/* Allocate the list of bitmap block objects */
nr
=
DIV_ROUND_UP
(
zone
->
spanned_pages
,
BM_BITS_PER_BLOCK
);
bb
=
create_bm_block_list
(
nr
,
&
ca
);
zone_bm
->
bm_blocks
=
bb
;
zone_bm
->
cur_block
=
bb
;
if
(
!
bb
)
goto
Free
;
bb
=
list_entry
(
bm
->
blocks
.
prev
,
struct
bm_block
,
hook
);
nr
=
zone
->
spanned_pages
;
pfn
=
zone
->
zone_start_pfn
;
/* Initialize the bitmap block objects */
while
(
bb
)
{
unsigned
long
*
ptr
;
error
=
create_bm_block_list
(
pages
,
bm
->
blocks
.
prev
,
&
ca
);
if
(
error
)
goto
Error
;
ptr
=
get_image_page
(
gfp_mask
,
safe_needed
);
bb
->
data
=
ptr
;
if
(
!
ptr
)
goto
Free
;
list_for_each_entry_continue
(
bb
,
&
bm
->
blocks
,
hook
)
{
bb
->
data
=
get_image_page
(
gfp_mask
,
safe_needed
);
if
(
!
bb
->
data
)
{
error
=
-
ENOMEM
;
goto
Error
;
}
bb
->
start_pfn
=
pfn
;
if
(
nr
>=
BM_BITS_PER_BLOCK
)
{
if
(
pages
>=
BM_BITS_PER_BLOCK
)
{
pfn
+=
BM_BITS_PER_BLOCK
;
nr
-=
BM_BITS_PER_BLOCK
;
pages
-=
BM_BITS_PER_BLOCK
;
}
else
{
/* This is executed only once in the loop */
pfn
+=
nr
;
pfn
+=
pages
;
}
bb
->
end_pfn
=
pfn
;
bb
=
bb
->
next
;
}
zone_bm
=
zone_bm
->
next
;
}
bm
->
p_list
=
ca
.
chain
;
memory_bm_position_reset
(
bm
);
return
0
;
Exit:
free_mem_extents
(
&
mem_extents
);
return
error
;
Free
:
Error
:
bm
->
p_list
=
ca
.
chain
;
memory_bm_free
(
bm
,
PG_UNSAFE_CLEAR
);
return
-
ENOMEM
;
goto
Exit
;
}
/**
* memory_bm_free - free memory occupied by the memory bitmap @bm
*/
static
void
memory_bm_free
(
struct
memory_bitmap
*
bm
,
int
clear_nosave_free
)
{
struct
zone_bitmap
*
zone_bm
;
/* Free the list of bit blocks for each zone_bitmap object */
zone_bm
=
bm
->
zone_bm_list
;
while
(
zone_bm
)
{
struct
bm_block
*
bb
;
bb
=
zone_bm
->
bm_blocks
;
while
(
bb
)
{
list_for_each_entry
(
bb
,
&
bm
->
blocks
,
hook
)
if
(
bb
->
data
)
free_image_page
(
bb
->
data
,
clear_nosave_free
);
bb
=
bb
->
next
;
}
zone_bm
=
zone_bm
->
next
;
}
free_list_of_pages
(
bm
->
p_list
,
clear_nosave_free
);
bm
->
zone_bm_list
=
NULL
;
INIT_LIST_HEAD
(
&
bm
->
blocks
);
}
/**
...
...
@@ -437,38 +452,33 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
* to given pfn. The cur_zone_bm member of @bm and the cur_block member
* of @bm->cur_zone_bm are updated.
*/
static
int
memory_bm_find_bit
(
struct
memory_bitmap
*
bm
,
unsigned
long
pfn
,
void
**
addr
,
unsigned
int
*
bit_nr
)
{
struct
zone_bitmap
*
zone_bm
;
struct
bm_block
*
bb
;
/* Check if the pfn is from the current zone */
zone_bm
=
bm
->
cur
.
zone_bm
;
if
(
pfn
<
zone_bm
->
start_pfn
||
pfn
>=
zone_bm
->
end_pfn
)
{
zone_bm
=
bm
->
zone_bm_list
;
/* We don't assume that the zones are sorted by pfns */
while
(
pfn
<
zone_bm
->
start_pfn
||
pfn
>=
zone_bm
->
end_pfn
)
{
zone_bm
=
zone_bm
->
next
;
if
(
!
zone_bm
)
return
-
EFAULT
;
}
bm
->
cur
.
zone_bm
=
zone_bm
;
}
/* Check if the pfn corresponds to the current bitmap block */
bb
=
zone_bm
->
cur_block
;
/*
* Check if the pfn corresponds to the current bitmap block and find
* the block where it fits if this is not the case.
*/
bb
=
bm
->
cur
.
block
;
if
(
pfn
<
bb
->
start_pfn
)
bb
=
zone_bm
->
bm_blocks
;
list_for_each_entry_continue_reverse
(
bb
,
&
bm
->
blocks
,
hook
)
if
(
pfn
>=
bb
->
start_pfn
)
break
;
while
(
pfn
>=
bb
->
end_pfn
)
{
bb
=
bb
->
next
;
if
(
pfn
>=
bb
->
end_pfn
)
list_for_each_entry_continue
(
bb
,
&
bm
->
blocks
,
hook
)
if
(
pfn
>=
bb
->
start_pfn
&&
pfn
<
bb
->
end_pfn
)
break
;
BUG_ON
(
!
bb
);
}
zone_bm
->
cur_block
=
bb
;
if
(
&
bb
->
hook
==
&
bm
->
blocks
)
return
-
EFAULT
;
/* The block has been found */
bm
->
cur
.
block
=
bb
;
pfn
-=
bb
->
start_pfn
;
bm
->
cur
.
bit
=
pfn
+
1
;
*
bit_nr
=
pfn
;
*
addr
=
bb
->
data
;
return
0
;
...
...
@@ -519,6 +529,14 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
return
test_bit
(
bit
,
addr
);
}
static
bool
memory_bm_pfn_present
(
struct
memory_bitmap
*
bm
,
unsigned
long
pfn
)
{
void
*
addr
;
unsigned
int
bit
;
return
!
memory_bm_find_bit
(
bm
,
pfn
,
&
addr
,
&
bit
);
}
/**
* memory_bm_next_pfn - find the pfn that corresponds to the next set bit
* in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
...
...
@@ -530,11 +548,9 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
static
unsigned
long
memory_bm_next_pfn
(
struct
memory_bitmap
*
bm
)
{
struct
zone_bitmap
*
zone_bm
;
struct
bm_block
*
bb
;
int
bit
;
do
{
bb
=
bm
->
cur
.
block
;
do
{
bit
=
bm
->
cur
.
bit
;
...
...
@@ -542,17 +558,11 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
if
(
bit
<
bm_block_bits
(
bb
))
goto
Return_pfn
;
bb
=
bb
->
next
;
bb
=
list_entry
(
bb
->
hook
.
next
,
struct
bm_block
,
hook
)
;
bm
->
cur
.
block
=
bb
;
bm
->
cur
.
bit
=
0
;
}
while
(
bb
);
zone_bm
=
bm
->
cur
.
zone_bm
->
next
;
if
(
zone_bm
)
{
bm
->
cur
.
zone_bm
=
zone_bm
;
bm
->
cur
.
block
=
zone_bm
->
bm_blocks
;
bm
->
cur
.
bit
=
0
;
}
}
while
(
zone_bm
);
}
while
(
&
bb
->
hook
!=
&
bm
->
blocks
);
memory_bm_position_reset
(
bm
);
return
BM_END_OF_MAP
;
...
...
@@ -808,8 +818,7 @@ static unsigned int count_free_highmem_pages(void)
* We should save the page if it isn't Nosave or NosaveFree, or Reserved,
* and it isn't a part of a free chunk of pages.
*/
static
struct
page
*
saveable_highmem_page
(
unsigned
long
pfn
)
static
struct
page
*
saveable_highmem_page
(
struct
zone
*
zone
,
unsigned
long
pfn
)
{
struct
page
*
page
;
...
...
@@ -817,6 +826,8 @@ static struct page *saveable_highmem_page(unsigned long pfn)
return
NULL
;
page
=
pfn_to_page
(
pfn
);
if
(
page_zone
(
page
)
!=
zone
)
return
NULL
;
BUG_ON
(
!
PageHighMem
(
page
));
...
...
@@ -846,13 +857,16 @@ unsigned int count_highmem_pages(void)
mark_free_pages
(
zone
);
max_zone_pfn
=
zone
->
zone_start_pfn
+
zone
->
spanned_pages
;
for
(
pfn
=
zone
->
zone_start_pfn
;
pfn
<
max_zone_pfn
;
pfn
++
)
if
(
saveable_highmem_page
(
pfn
))
if
(
saveable_highmem_page
(
zone
,
pfn
))
n
++
;
}
return
n
;
}
#else
static
inline
void
*
saveable_highmem_page
(
unsigned
long
pfn
)
{
return
NULL
;
}
static
inline
void
*
saveable_highmem_page
(
struct
zone
*
z
,
unsigned
long
p
)
{
return
NULL
;
}
#endif
/* CONFIG_HIGHMEM */
/**
...
...
@@ -863,8 +877,7 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
* of pages statically defined as 'unsaveable', and it isn't a part of
* a free chunk of pages.
*/
static
struct
page
*
saveable_page
(
unsigned
long
pfn
)
static
struct
page
*
saveable_page
(
struct
zone
*
zone
,
unsigned
long
pfn
)
{
struct
page
*
page
;
...
...
@@ -872,6 +885,8 @@ static struct page *saveable_page(unsigned long pfn)
return
NULL
;
page
=
pfn_to_page
(
pfn
);
if
(
page_zone
(
page
)
!=
zone
)
return
NULL
;
BUG_ON
(
PageHighMem
(
page
));
...
...
@@ -903,7 +918,7 @@ unsigned int count_data_pages(void)
mark_free_pages
(
zone
);
max_zone_pfn
=
zone
->
zone_start_pfn
+
zone
->
spanned_pages
;
for
(
pfn
=
zone
->
zone_start_pfn
;
pfn
<
max_zone_pfn
;
pfn
++
)
if
(
saveable_page
(
pfn
))
if
(
saveable_page
(
zone
,
pfn
))
n
++
;
}
return
n
;
...
...
@@ -944,7 +959,7 @@ static inline struct page *
page_is_saveable
(
struct
zone
*
zone
,
unsigned
long
pfn
)
{
return
is_highmem
(
zone
)
?
saveable_highmem_page
(
pfn
)
:
saveable_page
(
pfn
);
saveable_highmem_page
(
zone
,
pfn
)
:
saveable_page
(
zone
,
pfn
);
}
static
void
copy_data_page
(
unsigned
long
dst_pfn
,
unsigned
long
src_pfn
)
...
...
@@ -966,7 +981,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
* data modified by kmap_atomic()
*/
safe_copy_page
(
buffer
,
s_page
);
dst
=
kmap_atomic
(
pfn_to_page
(
dst_pfn
)
,
KM_USER0
);
dst
=
kmap_atomic
(
d_page
,
KM_USER0
);
memcpy
(
dst
,
buffer
,
PAGE_SIZE
);
kunmap_atomic
(
dst
,
KM_USER0
);
}
else
{
...
...
@@ -975,7 +990,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
}
}
#else
#define page_is_saveable(zone, pfn) saveable_page(pfn)
#define page_is_saveable(zone, pfn) saveable_page(
zone,
pfn)
static
inline
void
copy_data_page
(
unsigned
long
dst_pfn
,
unsigned
long
src_pfn
)
{
...
...
@@ -1459,9 +1474,7 @@ load_header(struct swsusp_info *info)
* unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
* the corresponding bit in the memory bitmap @bm
*/
static
inline
void
unpack_orig_pfns
(
unsigned
long
*
buf
,
struct
memory_bitmap
*
bm
)
static
int
unpack_orig_pfns
(
unsigned
long
*
buf
,
struct
memory_bitmap
*
bm
)
{
int
j
;
...
...
@@ -1469,8 +1482,13 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
if
(
unlikely
(
buf
[
j
]
==
BM_END_OF_MAP
))
break
;
if
(
memory_bm_pfn_present
(
bm
,
buf
[
j
]))
memory_bm_set_bit
(
bm
,
buf
[
j
]);
else
return
-
EFAULT
;
}
return
0
;
}
/* List of "safe" pages that may be used to store data loaded from the suspend
...
...
@@ -1608,7 +1626,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
pbe
=
chain_alloc
(
ca
,
sizeof
(
struct
highmem_pbe
));
if
(
!
pbe
)
{
swsusp_free
();
return
NULL
;
return
ERR_PTR
(
-
ENOMEM
)
;
}
pbe
->
orig_page
=
page
;
if
(
safe_highmem_pages
>
0
)
{
...
...
@@ -1677,7 +1695,7 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
static
inline
void
*
get_highmem_page_buffer
(
struct
page
*
page
,
struct
chain_allocator
*
ca
)
{
return
NULL
;
return
ERR_PTR
(
-
EINVAL
)
;
}
static
inline
void
copy_last_highmem_page
(
void
)
{}
...
...
@@ -1788,8 +1806,13 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
static
void
*
get_buffer
(
struct
memory_bitmap
*
bm
,
struct
chain_allocator
*
ca
)
{
struct
pbe
*
pbe
;
struct
page
*
page
=
pfn_to_page
(
memory_bm_next_pfn
(
bm
));
struct
page
*
page
;
unsigned
long
pfn
=
memory_bm_next_pfn
(
bm
);
if
(
pfn
==
BM_END_OF_MAP
)
return
ERR_PTR
(
-
EFAULT
);
page
=
pfn_to_page
(
pfn
);
if
(
PageHighMem
(
page
))
return
get_highmem_page_buffer
(
page
,
ca
);
...
...
@@ -1805,7 +1828,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
pbe
=
chain_alloc
(
ca
,
sizeof
(
struct
pbe
));
if
(
!
pbe
)
{
swsusp_free
();
return
NULL
;
return
ERR_PTR
(
-
ENOMEM
)
;
}
pbe
->
orig_address
=
page_address
(
page
);
pbe
->
address
=
safe_pages_list
;
...
...
@@ -1868,7 +1891,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
return
error
;
}
else
if
(
handle
->
prev
<=
nr_meta_pages
)
{
unpack_orig_pfns
(
buffer
,
&
copy_bm
);
error
=
unpack_orig_pfns
(
buffer
,
&
copy_bm
);
if
(
error
)
return
error
;
if
(
handle
->
prev
==
nr_meta_pages
)
{
error
=
prepare_image
(
&
orig_bm
,
&
copy_bm
);
if
(
error
)
...
...
@@ -1879,12 +1905,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
restore_pblist
=
NULL
;
handle
->
buffer
=
get_buffer
(
&
orig_bm
,
&
ca
);
handle
->
sync_read
=
0
;
if
(
!
handle
->
buffer
)
return
-
ENOMEM
;
if
(
IS_ERR
(
handle
->
buffer
)
)
return
PTR_ERR
(
handle
->
buffer
)
;
}
}
else
{
copy_last_highmem_page
();
handle
->
buffer
=
get_buffer
(
&
orig_bm
,
&
ca
);
if
(
IS_ERR
(
handle
->
buffer
))
return
PTR_ERR
(
handle
->
buffer
);
if
(
handle
->
buffer
!=
buffer
)
handle
->
sync_read
=
0
;
}
...
...
kernel/power/swsusp.c
View file @
3cc8a5f4
...
...
@@ -262,3 +262,125 @@ int swsusp_shrink_memory(void)
return
0
;
}
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* hibernation and to restore the contents of this memory during the subsequent
* resume. The code below implements a mechanism allowing us to do that.
*/
struct
nvs_page
{
unsigned
long
phys_start
;
unsigned
int
size
;
void
*
kaddr
;
void
*
data
;
struct
list_head
node
;
};
static
LIST_HEAD
(
nvs_list
);
/**
* hibernate_nvs_register - register platform NVS memory region to save
* @start - physical address of the region
* @size - size of the region
*
* The NVS region need not be page-aligned (both ends) and we arrange
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
int
hibernate_nvs_register
(
unsigned
long
start
,
unsigned
long
size
)
{
struct
nvs_page
*
entry
,
*
next
;
while
(
size
>
0
)
{
unsigned
int
nr_bytes
;
entry
=
kzalloc
(
sizeof
(
struct
nvs_page
),
GFP_KERNEL
);
if
(
!
entry
)
goto
Error
;
list_add_tail
(
&
entry
->
node
,
&
nvs_list
);
entry
->
phys_start
=
start
;
nr_bytes
=
PAGE_SIZE
-
(
start
&
~
PAGE_MASK
);
entry
->
size
=
(
size
<
nr_bytes
)
?
size
:
nr_bytes
;
start
+=
entry
->
size
;
size
-=
entry
->
size
;
}
return
0
;
Error:
list_for_each_entry_safe
(
entry
,
next
,
&
nvs_list
,
node
)
{
list_del
(
&
entry
->
node
);
kfree
(
entry
);
}
return
-
ENOMEM
;
}
/**
* hibernate_nvs_free - free data pages allocated for saving NVS regions
*/
void
hibernate_nvs_free
(
void
)
{
struct
nvs_page
*
entry
;
list_for_each_entry
(
entry
,
&
nvs_list
,
node
)
if
(
entry
->
data
)
{
free_page
((
unsigned
long
)
entry
->
data
);
entry
->
data
=
NULL
;
if
(
entry
->
kaddr
)
{
iounmap
(
entry
->
kaddr
);
entry
->
kaddr
=
NULL
;
}
}
}
/**
* hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
*/
int
hibernate_nvs_alloc
(
void
)
{
struct
nvs_page
*
entry
;
list_for_each_entry
(
entry
,
&
nvs_list
,
node
)
{
entry
->
data
=
(
void
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
!
entry
->
data
)
{
hibernate_nvs_free
();
return
-
ENOMEM
;
}
}
return
0
;
}
/**
* hibernate_nvs_save - save NVS memory regions
*/
void
hibernate_nvs_save
(
void
)
{
struct
nvs_page
*
entry
;
printk
(
KERN_INFO
"PM: Saving platform NVS memory
\n
"
);
list_for_each_entry
(
entry
,
&
nvs_list
,
node
)
if
(
entry
->
data
)
{
entry
->
kaddr
=
ioremap
(
entry
->
phys_start
,
entry
->
size
);
memcpy
(
entry
->
data
,
entry
->
kaddr
,
entry
->
size
);
}
}
/**
* hibernate_nvs_restore - restore NVS memory regions
*
* This function is going to be called with interrupts disabled, so it
* cannot iounmap the virtual addresses used to access the NVS region.
*/
void
hibernate_nvs_restore
(
void
)
{
struct
nvs_page
*
entry
;
printk
(
KERN_INFO
"PM: Restoring platform NVS memory
\n
"
);
list_for_each_entry
(
entry
,
&
nvs_list
,
node
)
if
(
entry
->
data
)
memcpy
(
entry
->
kaddr
,
entry
->
data
,
entry
->
size
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment