Commit 24b6d416 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams

mm: pass the vmem_altmap to vmemmap_free

We can just pass this on instead of having to do a radix tree lookup
without proper locking a few levels into the callchain.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent da024512
...@@ -696,7 +696,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -696,7 +696,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0; return 0;
} }
#endif /* CONFIG_ARM64_64K_PAGES */ #endif /* CONFIG_ARM64_64K_PAGES */
void vmemmap_free(unsigned long start, unsigned long end) void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
} }
#endif /* CONFIG_SPARSEMEM_VMEMMAP */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
......
...@@ -760,7 +760,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -760,7 +760,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return vmemmap_populate_basepages(start, end, node); return vmemmap_populate_basepages(start, end, node);
} }
void vmemmap_free(unsigned long start, unsigned long end) void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
} }
#endif #endif
...@@ -254,7 +254,8 @@ static unsigned long vmemmap_list_free(unsigned long start) ...@@ -254,7 +254,8 @@ static unsigned long vmemmap_list_free(unsigned long start)
return vmem_back->phys; return vmem_back->phys;
} }
void __ref vmemmap_free(unsigned long start, unsigned long end) void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
unsigned long page_order = get_order(page_size); unsigned long page_order = get_order(page_size);
...@@ -265,7 +266,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end) ...@@ -265,7 +266,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
for (; start < end; start += page_size) { for (; start < end; start += page_size) {
unsigned long nr_pages, addr; unsigned long nr_pages, addr;
struct vmem_altmap *altmap;
struct page *section_base; struct page *section_base;
struct page *page; struct page *page;
...@@ -285,7 +285,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end) ...@@ -285,7 +285,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
section_base = pfn_to_page(vmemmap_section_start(start)); section_base = pfn_to_page(vmemmap_section_start(start));
nr_pages = 1 << page_order; nr_pages = 1 << page_order;
altmap = to_vmem_altmap((unsigned long) section_base);
if (altmap) { if (altmap) {
vmem_altmap_free(altmap, nr_pages); vmem_altmap_free(altmap, nr_pages);
} else if (PageReserved(page)) { } else if (PageReserved(page)) {
......
...@@ -297,7 +297,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -297,7 +297,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return ret; return ret;
} }
void vmemmap_free(unsigned long start, unsigned long end) void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
} }
......
...@@ -2671,7 +2671,8 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, ...@@ -2671,7 +2671,8 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
return 0; return 0;
} }
void vmemmap_free(unsigned long start, unsigned long end) void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
} }
#endif /* CONFIG_SPARSEMEM_VMEMMAP */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
......
...@@ -800,11 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, ...@@ -800,11 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
#define PAGE_INUSE 0xFD #define PAGE_INUSE 0xFD
static void __meminit free_pagetable(struct page *page, int order) static void __meminit free_pagetable(struct page *page, int order,
struct vmem_altmap *altmap)
{ {
unsigned long magic; unsigned long magic;
unsigned int nr_pages = 1 << order; unsigned int nr_pages = 1 << order;
struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
if (altmap) { if (altmap) {
vmem_altmap_free(altmap, nr_pages); vmem_altmap_free(altmap, nr_pages);
...@@ -826,7 +826,8 @@ static void __meminit free_pagetable(struct page *page, int order) ...@@ -826,7 +826,8 @@ static void __meminit free_pagetable(struct page *page, int order)
free_pages((unsigned long)page_address(page), order); free_pages((unsigned long)page_address(page), order);
} }
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
struct vmem_altmap *altmap)
{ {
pte_t *pte; pte_t *pte;
int i; int i;
...@@ -838,13 +839,14 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) ...@@ -838,13 +839,14 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
} }
/* free a pte talbe */ /* free a pte talbe */
free_pagetable(pmd_page(*pmd), 0); free_pagetable(pmd_page(*pmd), 0, altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd); pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
struct vmem_altmap *altmap)
{ {
pmd_t *pmd; pmd_t *pmd;
int i; int i;
...@@ -856,13 +858,14 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) ...@@ -856,13 +858,14 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
} }
/* free a pmd talbe */ /* free a pmd talbe */
free_pagetable(pud_page(*pud), 0); free_pagetable(pud_page(*pud), 0, altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
struct vmem_altmap *altmap)
{ {
pud_t *pud; pud_t *pud;
int i; int i;
...@@ -874,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) ...@@ -874,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
} }
/* free a pud talbe */ /* free a pud talbe */
free_pagetable(p4d_page(*p4d), 0); free_pagetable(p4d_page(*p4d), 0, altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
p4d_clear(p4d); p4d_clear(p4d);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
...@@ -882,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) ...@@ -882,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
static void __meminit static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
bool direct) struct vmem_altmap *altmap, bool direct)
{ {
unsigned long next, pages = 0; unsigned long next, pages = 0;
pte_t *pte; pte_t *pte;
...@@ -913,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, ...@@ -913,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
* freed when offlining, or simplely not in use. * freed when offlining, or simplely not in use.
*/ */
if (!direct) if (!direct)
free_pagetable(pte_page(*pte), 0); free_pagetable(pte_page(*pte), 0, altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
...@@ -936,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, ...@@ -936,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
page_addr = page_address(pte_page(*pte)); page_addr = page_address(pte_page(*pte));
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
free_pagetable(pte_page(*pte), 0); free_pagetable(pte_page(*pte), 0, altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
...@@ -953,7 +956,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, ...@@ -953,7 +956,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
static void __meminit static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
bool direct) bool direct, struct vmem_altmap *altmap)
{ {
unsigned long next, pages = 0; unsigned long next, pages = 0;
pte_t *pte_base; pte_t *pte_base;
...@@ -972,7 +975,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -972,7 +975,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PMD_SIZE)) { IS_ALIGNED(next, PMD_SIZE)) {
if (!direct) if (!direct)
free_pagetable(pmd_page(*pmd), free_pagetable(pmd_page(*pmd),
get_order(PMD_SIZE)); get_order(PMD_SIZE),
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd); pmd_clear(pmd);
...@@ -986,7 +990,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -986,7 +990,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
if (!memchr_inv(page_addr, PAGE_INUSE, if (!memchr_inv(page_addr, PAGE_INUSE,
PMD_SIZE)) { PMD_SIZE)) {
free_pagetable(pmd_page(*pmd), free_pagetable(pmd_page(*pmd),
get_order(PMD_SIZE)); get_order(PMD_SIZE),
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd); pmd_clear(pmd);
...@@ -998,8 +1003,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -998,8 +1003,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
} }
pte_base = (pte_t *)pmd_page_vaddr(*pmd); pte_base = (pte_t *)pmd_page_vaddr(*pmd);
remove_pte_table(pte_base, addr, next, direct); remove_pte_table(pte_base, addr, next, altmap, direct);
free_pte_table(pte_base, pmd); free_pte_table(pte_base, pmd, altmap);
} }
/* Call free_pmd_table() in remove_pud_table(). */ /* Call free_pmd_table() in remove_pud_table(). */
...@@ -1009,7 +1014,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -1009,7 +1014,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
static void __meminit static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
bool direct) struct vmem_altmap *altmap, bool direct)
{ {
unsigned long next, pages = 0; unsigned long next, pages = 0;
pmd_t *pmd_base; pmd_t *pmd_base;
...@@ -1028,7 +1033,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1028,7 +1033,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE)) { IS_ALIGNED(next, PUD_SIZE)) {
if (!direct) if (!direct)
free_pagetable(pud_page(*pud), free_pagetable(pud_page(*pud),
get_order(PUD_SIZE)); get_order(PUD_SIZE),
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
...@@ -1042,7 +1048,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1042,7 +1048,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!memchr_inv(page_addr, PAGE_INUSE, if (!memchr_inv(page_addr, PAGE_INUSE,
PUD_SIZE)) { PUD_SIZE)) {
free_pagetable(pud_page(*pud), free_pagetable(pud_page(*pud),
get_order(PUD_SIZE)); get_order(PUD_SIZE),
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
...@@ -1054,8 +1061,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1054,8 +1061,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
} }
pmd_base = pmd_offset(pud, 0); pmd_base = pmd_offset(pud, 0);
remove_pmd_table(pmd_base, addr, next, direct); remove_pmd_table(pmd_base, addr, next, direct, altmap);
free_pmd_table(pmd_base, pud); free_pmd_table(pmd_base, pud, altmap);
} }
if (direct) if (direct)
...@@ -1064,7 +1071,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1064,7 +1071,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
static void __meminit static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
bool direct) struct vmem_altmap *altmap, bool direct)
{ {
unsigned long next, pages = 0; unsigned long next, pages = 0;
pud_t *pud_base; pud_t *pud_base;
...@@ -1080,14 +1087,14 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, ...@@ -1080,14 +1087,14 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
BUILD_BUG_ON(p4d_large(*p4d)); BUILD_BUG_ON(p4d_large(*p4d));
pud_base = pud_offset(p4d, 0); pud_base = pud_offset(p4d, 0);
remove_pud_table(pud_base, addr, next, direct); remove_pud_table(pud_base, addr, next, altmap, direct);
/* /*
* For 4-level page tables we do not want to free PUDs, but in the * For 4-level page tables we do not want to free PUDs, but in the
* 5-level case we should free them. This code will have to change * 5-level case we should free them. This code will have to change
* to adapt for boot-time switching between 4 and 5 level page tables. * to adapt for boot-time switching between 4 and 5 level page tables.
*/ */
if (CONFIG_PGTABLE_LEVELS == 5) if (CONFIG_PGTABLE_LEVELS == 5)
free_pud_table(pud_base, p4d); free_pud_table(pud_base, p4d, altmap);
} }
if (direct) if (direct)
...@@ -1096,7 +1103,8 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, ...@@ -1096,7 +1103,8 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
/* start and end are both virtual address. */ /* start and end are both virtual address. */
static void __meminit static void __meminit
remove_pagetable(unsigned long start, unsigned long end, bool direct) remove_pagetable(unsigned long start, unsigned long end, bool direct,
struct vmem_altmap *altmap)
{ {
unsigned long next; unsigned long next;
unsigned long addr; unsigned long addr;
...@@ -1111,15 +1119,16 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) ...@@ -1111,15 +1119,16 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
continue; continue;
p4d = p4d_offset(pgd, 0); p4d = p4d_offset(pgd, 0);
remove_p4d_table(p4d, addr, next, direct); remove_p4d_table(p4d, addr, next, altmap, direct);
} }
flush_tlb_all(); flush_tlb_all();
} }
void __ref vmemmap_free(unsigned long start, unsigned long end) void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{ {
remove_pagetable(start, end, false); remove_pagetable(start, end, false, altmap);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
...@@ -1129,7 +1138,7 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) ...@@ -1129,7 +1138,7 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
start = (unsigned long)__va(start); start = (unsigned long)__va(start);
end = (unsigned long)__va(end); end = (unsigned long)__va(end);
remove_pagetable(start, end, true); remove_pagetable(start, end, true, NULL);
} }
int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
......
...@@ -331,7 +331,7 @@ extern void remove_memory(int nid, u64 start, u64 size); ...@@ -331,7 +331,7 @@ extern void remove_memory(int nid, u64 start, u64 size);
extern int sparse_add_one_section(struct pglist_data *pgdat, extern int sparse_add_one_section(struct pglist_data *pgdat,
unsigned long start_pfn, struct vmem_altmap *altmap); unsigned long start_pfn, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset); unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum); unsigned long pnum);
extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
......
...@@ -2561,7 +2561,8 @@ int vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -2561,7 +2561,8 @@ int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap); struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void); void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end); void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif #endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long nr_pages); unsigned long nr_pages);
......
...@@ -536,7 +536,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn) ...@@ -536,7 +536,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
} }
static int __remove_section(struct zone *zone, struct mem_section *ms, static int __remove_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset) unsigned long map_offset, struct vmem_altmap *altmap)
{ {
unsigned long start_pfn; unsigned long start_pfn;
int scn_nr; int scn_nr;
...@@ -553,7 +553,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, ...@@ -553,7 +553,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
start_pfn = section_nr_to_pfn((unsigned long)scn_nr); start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
__remove_zone(zone, start_pfn); __remove_zone(zone, start_pfn);
sparse_remove_one_section(zone, ms, map_offset); sparse_remove_one_section(zone, ms, map_offset, altmap);
return 0; return 0;
} }
...@@ -607,7 +607,8 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, ...@@ -607,7 +607,8 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
for (i = 0; i < sections_to_remove; i++) { for (i = 0; i < sections_to_remove; i++) {
unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
ret = __remove_section(zone, __pfn_to_section(pfn), map_offset); ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
altmap);
map_offset = 0; map_offset = 0;
if (ret) if (ret)
break; break;
......
...@@ -685,12 +685,13 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, ...@@ -685,12 +685,13 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
/* This will make the necessary allocations eventually. */ /* This will make the necessary allocations eventually. */
return sparse_mem_map_populate(pnum, nid, altmap); return sparse_mem_map_populate(pnum, nid, altmap);
} }
static void __kfree_section_memmap(struct page *memmap) static void __kfree_section_memmap(struct page *memmap,
struct vmem_altmap *altmap)
{ {
unsigned long start = (unsigned long)memmap; unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end); vmemmap_free(start, end, altmap);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
static void free_map_bootmem(struct page *memmap) static void free_map_bootmem(struct page *memmap)
...@@ -698,7 +699,7 @@ static void free_map_bootmem(struct page *memmap) ...@@ -698,7 +699,7 @@ static void free_map_bootmem(struct page *memmap)
unsigned long start = (unsigned long)memmap; unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end); vmemmap_free(start, end, NULL);
} }
#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTREMOVE */
#else #else
...@@ -729,7 +730,8 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, ...@@ -729,7 +730,8 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
return __kmalloc_section_memmap(); return __kmalloc_section_memmap();
} }
static void __kfree_section_memmap(struct page *memmap) static void __kfree_section_memmap(struct page *memmap,
struct vmem_altmap *altmap)
{ {
if (is_vmalloc_addr(memmap)) if (is_vmalloc_addr(memmap))
vfree(memmap); vfree(memmap);
...@@ -798,7 +800,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, ...@@ -798,7 +800,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
return -ENOMEM; return -ENOMEM;
usemap = __kmalloc_section_usemap(); usemap = __kmalloc_section_usemap();
if (!usemap) { if (!usemap) {
__kfree_section_memmap(memmap); __kfree_section_memmap(memmap, altmap);
return -ENOMEM; return -ENOMEM;
} }
...@@ -820,7 +822,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, ...@@ -820,7 +822,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
pgdat_resize_unlock(pgdat, &flags); pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) { if (ret <= 0) {
kfree(usemap); kfree(usemap);
__kfree_section_memmap(memmap); __kfree_section_memmap(memmap, altmap);
} }
return ret; return ret;
} }
...@@ -847,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) ...@@ -847,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
} }
#endif #endif
static void free_section_usemap(struct page *memmap, unsigned long *usemap) static void free_section_usemap(struct page *memmap, unsigned long *usemap,
struct vmem_altmap *altmap)
{ {
struct page *usemap_page; struct page *usemap_page;
...@@ -861,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) ...@@ -861,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) { if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap); kfree(usemap);
if (memmap) if (memmap)
__kfree_section_memmap(memmap); __kfree_section_memmap(memmap, altmap);
return; return;
} }
...@@ -875,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) ...@@ -875,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
} }
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset) unsigned long map_offset, struct vmem_altmap *altmap)
{ {
struct page *memmap = NULL; struct page *memmap = NULL;
unsigned long *usemap = NULL, flags; unsigned long *usemap = NULL, flags;
...@@ -893,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, ...@@ -893,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
clear_hwpoisoned_pages(memmap + map_offset, clear_hwpoisoned_pages(memmap + map_offset,
PAGES_PER_SECTION - map_offset); PAGES_PER_SECTION - map_offset);
free_section_usemap(memmap, usemap); free_section_usemap(memmap, usemap, altmap);
} }
#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* CONFIG_MEMORY_HOTPLUG */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment