Commit f7635583 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86_64: change_page_attr logic fixes from Andrea

change_page_attr logic fixes from Andrea

This avoids reference counting leaks and adds BUGs for more wrong cases.
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7db37701
......@@ -120,27 +120,35 @@ __change_page_attr(struct page *page, pgprot_t prot)
kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
pte_t old = *kpte;
pte_t standard = mk_pte(page, PAGE_KERNEL);
set_pte_atomic(kpte, mk_pte(page, prot));
if (pte_same(old,standard))
get_page(kpte_page);
} else {
struct page *split = split_large_page(address, prot);
if (!split)
return -ENOMEM;
get_page(kpte_page);
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
kpte_page = split;
}
get_page(kpte_page);
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
__put_page(kpte_page);
}
} else
BUG();
if (cpu_has_pse && (page_count(kpte_page) == 1)) {
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
/*
* If the pte was reserved, it means it was created at boot
* time (not via split_large_page) and in turn we must not
* replace it with a largepage.
*/
if (!PageReserved(kpte_page)) {
/* memleak and potential failed 2M page regeneration */
BUG_ON(!page_count(kpte_page));
if (cpu_has_pse && (page_count(kpte_page) == 1)) {
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
}
return 0;
}
......
......@@ -131,28 +131,36 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
kpte_flags = pte_val(*kpte);
if (pgprot_val(prot) != pgprot_val(ref_prot)) {
if ((kpte_flags & _PAGE_PSE) == 0) {
pte_t old = *kpte;
pte_t standard = pfn_pte(pfn, ref_prot);
set_pte(kpte, pfn_pte(pfn, prot));
if (pte_same(old,standard))
get_page(kpte_page);
} else {
/*
* split_large_page will take the reference for this change_page_attr
* on the split page.
*/
struct page *split = split_large_page(address, prot, ref_prot);
if (!split)
return -ENOMEM;
get_page(split);
set_pte(kpte,mk_pte(split, ref_prot));
kpte_page = split;
}
get_page(kpte_page);
} else if ((kpte_flags & _PAGE_PSE) == 0) {
set_pte(kpte, pfn_pte(pfn, ref_prot));
__put_page(kpte_page);
}
} else
BUG();
if (page_count(kpte_page) == 1) {
/* on x86-64 the direct mapping set at boot is not using 4k pages */
BUG_ON(PageReserved(kpte_page));
switch (page_count(kpte_page)) {
case 1:
save_page(address, kpte_page);
revert_page(address, ref_prot);
}
break;
case 0:
BUG(); /* memleak and failed 2M page regeneration */
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment