Commit 24e6d5a5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams

mm: pass the vmem_altmap to arch_add_memory and __add_pages

We can just pass this on instead of having to do a radix tree lookup
without proper locking 2 levels into the callchain.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 55ce6e23
...@@ -647,13 +647,14 @@ mem_init (void) ...@@ -647,13 +647,14 @@ mem_init (void)
} }
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
if (ret) if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n", printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret); __func__, ret);
......
...@@ -127,7 +127,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) ...@@ -127,7 +127,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV; return -ENODEV;
} }
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
...@@ -144,7 +145,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) ...@@ -144,7 +145,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
return -EFAULT; return -EFAULT;
} }
return __add_pages(nid, start_pfn, nr_pages, want_memblock); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
......
...@@ -222,7 +222,8 @@ device_initcall(s390_cma_mem_init); ...@@ -222,7 +222,8 @@ device_initcall(s390_cma_mem_init);
#endif /* CONFIG_CMA */ #endif /* CONFIG_CMA */
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size); unsigned long size_pages = PFN_DOWN(size);
...@@ -232,7 +233,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) ...@@ -232,7 +233,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
if (rc) if (rc)
return rc; return rc;
rc = __add_pages(nid, start_pfn, size_pages, want_memblock); rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
if (rc) if (rc)
vmem_remove_mapping(start, size); vmem_remove_mapping(start, size);
return rc; return rc;
......
...@@ -485,14 +485,15 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -485,14 +485,15 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif #endif
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
/* We only have ZONE_NORMAL, so this is easy.. */ /* We only have ZONE_NORMAL, so this is easy.. */
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
if (unlikely(ret)) if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret); printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
......
...@@ -829,12 +829,13 @@ void __init mem_init(void) ...@@ -829,12 +829,13 @@ void __init mem_init(void)
} }
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
return __add_pages(nid, start_pfn, nr_pages, want_memblock); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
......
...@@ -772,12 +772,12 @@ static void update_end_of_memory_vars(u64 start, u64 size) ...@@ -772,12 +772,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
} }
} }
int add_pages(int nid, unsigned long start_pfn, int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
unsigned long nr_pages, bool want_memblock) struct vmem_altmap *altmap, bool want_memblock)
{ {
int ret; int ret;
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */ /* update max_pfn, max_low_pfn and high_memory */
...@@ -787,14 +787,15 @@ int add_pages(int nid, unsigned long start_pfn, ...@@ -787,14 +787,15 @@ int add_pages(int nid, unsigned long start_pfn,
return ret; return ret;
} }
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
init_memory_mapping(start, start + size); init_memory_mapping(start, start + size);
return add_pages(nid, start_pfn, nr_pages, want_memblock); return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
} }
#define PAGE_INUSE 0xFD #define PAGE_INUSE 0xFD
......
...@@ -13,6 +13,7 @@ struct pglist_data; ...@@ -13,6 +13,7 @@ struct pglist_data;
struct mem_section; struct mem_section;
struct memory_block; struct memory_block;
struct resource; struct resource;
struct vmem_altmap;
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
/* /*
...@@ -131,18 +132,19 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn, ...@@ -131,18 +132,19 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTREMOVE */
/* reasonably generic interface to expand the physical pages */ /* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
unsigned long nr_pages, bool want_memblock); struct vmem_altmap *altmap, bool want_memblock);
#ifndef CONFIG_ARCH_HAS_ADD_PAGES #ifndef CONFIG_ARCH_HAS_ADD_PAGES
static inline int add_pages(int nid, unsigned long start_pfn, static inline int add_pages(int nid, unsigned long start_pfn,
unsigned long nr_pages, bool want_memblock) unsigned long nr_pages, struct vmem_altmap *altmap,
bool want_memblock)
{ {
return __add_pages(nid, start_pfn, nr_pages, want_memblock); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
} }
#else /* ARCH_HAS_ADD_PAGES */ #else /* ARCH_HAS_ADD_PAGES */
int add_pages(int nid, unsigned long start_pfn, int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
unsigned long nr_pages, bool want_memblock); struct vmem_altmap *altmap, bool want_memblock);
#endif /* ARCH_HAS_ADD_PAGES */ #endif /* ARCH_HAS_ADD_PAGES */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -318,7 +320,8 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -318,7 +320,8 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *)); void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock); extern int arch_add_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages); unsigned long nr_pages);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
......
...@@ -382,6 +382,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -382,6 +382,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (altmap) { if (altmap) {
memcpy(&page_map->altmap, altmap, sizeof(*altmap)); memcpy(&page_map->altmap, altmap, sizeof(*altmap));
pgmap->altmap = &page_map->altmap; pgmap->altmap = &page_map->altmap;
altmap = pgmap->altmap;
} }
pgmap->ref = ref; pgmap->ref = ref;
pgmap->res = &page_map->res; pgmap->res = &page_map->res;
...@@ -427,7 +428,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -427,7 +428,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
goto err_pfn_remap; goto err_pfn_remap;
mem_hotplug_begin(); mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, false); error = arch_add_memory(nid, align_start, align_size, altmap, false);
if (!error) if (!error)
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT, align_start >> PAGE_SHIFT,
......
...@@ -931,10 +931,11 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) ...@@ -931,10 +931,11 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
* want the linear mapping and thus use arch_add_memory(). * want the linear mapping and thus use arch_add_memory().
*/ */
if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
ret = arch_add_memory(nid, align_start, align_size, false); ret = arch_add_memory(nid, align_start, align_size, NULL,
false);
else else
ret = add_pages(nid, align_start >> PAGE_SHIFT, ret = add_pages(nid, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, false); align_size >> PAGE_SHIFT, NULL, false);
if (ret) { if (ret) {
mem_hotplug_done(); mem_hotplug_done();
goto error_add_memory; goto error_add_memory;
......
...@@ -292,18 +292,17 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, ...@@ -292,18 +292,17 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
* add the new pages. * add the new pages.
*/ */
int __ref __add_pages(int nid, unsigned long phys_start_pfn, int __ref __add_pages(int nid, unsigned long phys_start_pfn,
unsigned long nr_pages, bool want_memblock) unsigned long nr_pages, struct vmem_altmap *altmap,
bool want_memblock)
{ {
unsigned long i; unsigned long i;
int err = 0; int err = 0;
int start_sec, end_sec; int start_sec, end_sec;
struct vmem_altmap *altmap;
/* during initialize mem_map, align hot-added range to section */ /* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr(phys_start_pfn); start_sec = pfn_to_section_nr(phys_start_pfn);
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
if (altmap) { if (altmap) {
/* /*
* Validate altmap is within bounds of the total request * Validate altmap is within bounds of the total request
...@@ -1148,7 +1147,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) ...@@ -1148,7 +1147,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
} }
/* call arch's memory hotadd */ /* call arch's memory hotadd */
ret = arch_add_memory(nid, start, size, true); ret = arch_add_memory(nid, start, size, NULL, true);
if (ret < 0) if (ret < 0)
goto error; goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment