Commit 467ba14e authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/radix: Fix huge vmap false positive

pmd_huge() is defined to false when HUGETLB_PAGE is not configured, but
the vmap code still installs huge PMDs. This leads to false bad PMD
errors when vunmapping because it is not seen as a huge PTE, and the bad
PMD check catches it. The end result may not be much more serious than
some bad pmd warning messages, because the pmd_none_or_clear_bad() does
what we wanted and clears the huge PTE anyway.

Fix this by checking pmd_is_leaf(), which checks for a PTE regardless of
config options. The whole huge/large/leaf stuff is a tangled mess but
that's kernel-wide and not something we can improve much in arch/powerpc
code.

pmd_page(), pud_page(), etc., called by vmalloc_to_page() on huge vmaps
can similarly trigger a false VM_BUG_ON when CONFIG_HUGETLB_PAGE=n, so
those checks are adjusted. The checks were added by commit d6eacedd
("powerpc/book3s: Use config independent helpers for page table walk"),
while implementing a similar fix for other page table walking functions.

Fixes: d909f910 ("powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP")
Cc: stable@vger.kernel.org # v5.3+
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211216103342.609192-1-npiggin@gmail.com
parent a605b39e
...@@ -1076,7 +1076,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) ...@@ -1076,7 +1076,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
int pud_clear_huge(pud_t *pud) int pud_clear_huge(pud_t *pud)
{ {
if (pud_huge(*pud)) { if (pud_is_leaf(*pud)) {
pud_clear(pud); pud_clear(pud);
return 1; return 1;
} }
...@@ -1123,7 +1123,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) ...@@ -1123,7 +1123,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd) int pmd_clear_huge(pmd_t *pmd)
{ {
if (pmd_huge(*pmd)) { if (pmd_is_leaf(*pmd)) {
pmd_clear(pmd); pmd_clear(pmd);
return 1; return 1;
} }
......
...@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift); ...@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
struct page *p4d_page(p4d_t p4d) struct page *p4d_page(p4d_t p4d)
{ {
if (p4d_is_leaf(p4d)) { if (p4d_is_leaf(p4d)) {
VM_WARN_ON(!p4d_huge(p4d)); if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d)); return pte_page(p4d_pte(p4d));
} }
return virt_to_page(p4d_pgtable(p4d)); return virt_to_page(p4d_pgtable(p4d));
...@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d) ...@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d)
struct page *pud_page(pud_t pud) struct page *pud_page(pud_t pud)
{ {
if (pud_is_leaf(pud)) { if (pud_is_leaf(pud)) {
VM_WARN_ON(!pud_huge(pud)); if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud)); return pte_page(pud_pte(pud));
} }
return virt_to_page(pud_pgtable(pud)); return virt_to_page(pud_pgtable(pud));
...@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud) ...@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud)
struct page *pmd_page(pmd_t pmd) struct page *pmd_page(pmd_t pmd)
{ {
if (pmd_is_leaf(pmd)) { if (pmd_is_leaf(pmd)) {
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); /*
* vmalloc_to_page may be called on any vmap address (not only
* vmalloc), and it uses pmd_page() etc., when huge vmap is
* enabled so these checks can't be used.
*/
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd)); return pte_page(pmd_pte(pmd));
} }
return virt_to_page(pmd_page_vaddr(pmd)); return virt_to_page(pmd_page_vaddr(pmd));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment