Commit 0d0a4bc2 authored by Reza Arbab's avatar Reza Arbab Committed by Michael Ellerman

powerpc/mm: unstub radix__vmemmap_remove_mapping()

Use remove_pagetable() and friends for radix vmemmap removal.

We do not require the special-case handling of vmemmap done in the x86
versions of these functions. This is because vmemmap_free() has already
freed the mapped pages, and calls us with an aligned address range.

So, add a few failsafe WARNs, but otherwise the code to remove physical
mappings is already sufficient for vmemmap.
Signed-off-by: default avatarReza Arbab <arbab@linux.vnet.ibm.com>
Acked-by: default avatarBalbir Singh <bsingharora@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 4b5d62ca
...@@ -527,6 +527,15 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, ...@@ -527,6 +527,15 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;
if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
/*
* The vmemmap_free() and remove_section_mapping()
* codepaths call us with aligned addresses.
*/
WARN_ONCE(1, "%s: unaligned range\n", __func__);
continue;
}
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
} }
} }
...@@ -546,6 +555,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, ...@@ -546,6 +555,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue; continue;
if (pmd_huge(*pmd)) { if (pmd_huge(*pmd)) {
if (!IS_ALIGNED(addr, PMD_SIZE) ||
!IS_ALIGNED(next, PMD_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
continue;
}
pte_clear(&init_mm, addr, (pte_t *)pmd); pte_clear(&init_mm, addr, (pte_t *)pmd);
continue; continue;
} }
...@@ -571,6 +586,12 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, ...@@ -571,6 +586,12 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
continue; continue;
if (pud_huge(*pud)) { if (pud_huge(*pud)) {
if (!IS_ALIGNED(addr, PUD_SIZE) ||
!IS_ALIGNED(next, PUD_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
continue;
}
pte_clear(&init_mm, addr, (pte_t *)pud); pte_clear(&init_mm, addr, (pte_t *)pud);
continue; continue;
} }
...@@ -597,6 +618,12 @@ static void remove_pagetable(unsigned long start, unsigned long end) ...@@ -597,6 +618,12 @@ static void remove_pagetable(unsigned long start, unsigned long end)
continue; continue;
if (pgd_huge(*pgd)) { if (pgd_huge(*pgd)) {
if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
!IS_ALIGNED(next, PGDIR_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
continue;
}
pte_clear(&init_mm, addr, (pte_t *)pgd); pte_clear(&init_mm, addr, (pte_t *)pgd);
continue; continue;
} }
...@@ -636,7 +663,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, ...@@ -636,7 +663,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
{ {
/* FIXME!! intel does more. We should free page tables mapping vmemmap ? */ remove_pagetable(start, start + page_size);
} }
#endif #endif
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment