Commit 381e63da authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpufreq' and 'pm-sleep'

* pm-cpufreq:
  cpufreq: release policy->rwsem on error
  cpufreq: fix cpufreq suspend/resume for intel_pstate

* pm-sleep:
  Revert "PM / Hibernate: Iterate over set bits instead of PFNs in swsusp_free()"
......@@ -1289,6 +1289,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
......@@ -1656,6 +1658,8 @@ void cpufreq_suspend(void)
if (!cpufreq_driver)
return;
cpufreq_suspended = true;
if (!has_target())
return;
......@@ -1670,8 +1674,6 @@ void cpufreq_suspend(void)
pr_err("%s: Failed to suspend driver: %p\n", __func__,
policy);
}
cpufreq_suspended = true;
}
/**
......@@ -1687,13 +1689,13 @@ void cpufreq_resume(void)
if (!cpufreq_driver)
return;
cpufreq_suspended = false;
if (!has_target())
return;
pr_debug("%s: Resuming Governors\n", __func__);
cpufreq_suspended = false;
list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
......
......@@ -725,14 +725,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
clear_bit(bit, addr);
}
static void memory_bm_clear_current(struct memory_bitmap *bm)
{
int bit;
bit = max(bm->cur.node_bit - 1, 0);
clear_bit(bit, bm->cur.node->data);
}
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
......@@ -1341,35 +1333,23 @@ static struct memory_bitmap copy_bm;
void swsusp_free(void)
{
unsigned long fb_pfn, fr_pfn;
memory_bm_position_reset(forbidden_pages_map);
memory_bm_position_reset(free_pages_map);
loop:
fr_pfn = memory_bm_next_pfn(free_pages_map);
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
/*
* Find the next bit set in both bitmaps. This is guaranteed to
* terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
*/
do {
if (fb_pfn < fr_pfn)
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
if (fr_pfn < fb_pfn)
fr_pfn = memory_bm_next_pfn(free_pages_map);
} while (fb_pfn != fr_pfn);
if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
struct page *page = pfn_to_page(fr_pfn);
struct zone *zone;
unsigned long pfn, max_zone_pfn;
memory_bm_clear_current(forbidden_pages_map);
memory_bm_clear_current(free_pages_map);
__free_page(page);
goto loop;
for_each_populated_zone(zone) {
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (swsusp_page_is_forbidden(page) &&
swsusp_page_is_free(page)) {
swsusp_unset_page_forbidden(page);
swsusp_unset_page_free(page);
__free_page(page);
}
}
}
nr_copy_pages = 0;
nr_meta_pages = 0;
restore_pblist = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment