Commit f6cf0545 authored by James Morse's avatar James Morse Committed by Will Deacon

PM / Hibernate: Call flush_icache_range() on pages restored in-place

Some architectures require code written to memory as if it were data to be
'cleaned' from any data caches before the processor can fetch them as new
instructions.

During resume from hibernate, the snapshot code copies some pages directly,
meaning these architectures do not get a chance to perform their cache
maintenance. Modify the read and decompress code to call
flush_icache_range() on all pages that are restored, so that the restored
in-place pages are guaranteed to be executable on these architectures.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Acked-by: default avatarRafael J. Wysocki <rjw@rjwysocki.net>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
[will: make clean_pages_on_* static and remove initialisers]
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 5003dbde
...@@ -36,6 +36,14 @@ ...@@ -36,6 +36,14 @@
#define HIBERNATE_SIG "S1SUSPEND" #define HIBERNATE_SIG "S1SUSPEND"
/*
* When reading an {un,}compressed image, we may restore pages in place,
* in which case some architectures need these pages cleaning before they
* can be executed. We don't know which pages these may be, so clean the lot.
*/
static bool clean_pages_on_read;
static bool clean_pages_on_decompress;
/* /*
* The swap map is a data structure used for keeping track of each page * The swap map is a data structure used for keeping track of each page
* written to a swap partition. It consists of many swap_map_page * written to a swap partition. It consists of many swap_map_page
...@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio) ...@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
if (bio_data_dir(bio) == WRITE) if (bio_data_dir(bio) == WRITE)
put_page(page); put_page(page);
else if (clean_pages_on_read)
flush_icache_range((unsigned long)page_address(page),
(unsigned long)page_address(page) + PAGE_SIZE);
if (bio->bi_error && !hb->error) if (bio->bi_error && !hb->error)
hb->error = bio->bi_error; hb->error = bio->bi_error;
...@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle, ...@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
hib_init_batch(&hb); hib_init_batch(&hb);
clean_pages_on_read = true;
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
nr_to_read); nr_to_read);
m = nr_to_read / 10; m = nr_to_read / 10;
...@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data) ...@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
d->unc_len = LZO_UNC_SIZE; d->unc_len = LZO_UNC_SIZE;
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
d->unc, &d->unc_len); d->unc, &d->unc_len);
if (clean_pages_on_decompress)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
atomic_set(&d->stop, 1); atomic_set(&d->stop, 1);
wake_up(&d->done); wake_up(&d->done);
} }
...@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle, ...@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
} }
memset(crc, 0, offsetof(struct crc_data, go)); memset(crc, 0, offsetof(struct crc_data, go));
clean_pages_on_decompress = true;
/* /*
* Start the decompression threads. * Start the decompression threads.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment