Commit a99583e7 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams

mm: pass the vmem_altmap to memmap_init_zone

Pass the vmem_altmap two levels down instead of needing a lookup.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 24b6d416
...@@ -501,7 +501,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) ...@@ -501,7 +501,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start), args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY); MEMMAP_EARLY, NULL);
return 0; return 0;
} }
...@@ -509,9 +509,10 @@ void __meminit ...@@ -509,9 +509,10 @@ void __meminit
memmap_init (unsigned long size, int nid, unsigned long zone, memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn) unsigned long start_pfn)
{ {
if (!vmem_map) if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
else { NULL);
} else {
struct page *start; struct page *start;
struct memmap_init_callback_data args; struct memmap_init_callback_data args;
......
...@@ -324,7 +324,7 @@ extern int add_memory_resource(int nid, struct resource *resource, bool online); ...@@ -324,7 +324,7 @@ extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size, extern int arch_add_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap, bool want_memblock); struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages); unsigned long nr_pages, struct vmem_altmap *altmap);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem); extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size); extern void remove_memory(int nid, u64 start, u64 size);
......
...@@ -2069,8 +2069,8 @@ static inline void zero_resv_unavail(void) {} ...@@ -2069,8 +2069,8 @@ static inline void zero_resv_unavail(void) {}
#endif #endif
extern void set_dma_reserve(unsigned long new_dma_reserve); extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
unsigned long, enum memmap_context); enum memmap_context, struct vmem_altmap *);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);
......
...@@ -432,7 +432,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -432,7 +432,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (!error) if (!error)
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT); align_size >> PAGE_SHIFT, altmap);
mem_hotplug_done(); mem_hotplug_done();
if (error) if (error)
goto err_add_memory; goto err_add_memory;
......
...@@ -942,7 +942,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) ...@@ -942,7 +942,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
} }
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT); align_size >> PAGE_SHIFT, NULL);
mem_hotplug_done(); mem_hotplug_done();
for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
......
...@@ -798,8 +798,8 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon ...@@ -798,8 +798,8 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
} }
void __ref move_pfn_range_to_zone(struct zone *zone, void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long start_pfn, unsigned long nr_pages) unsigned long nr_pages, struct vmem_altmap *altmap)
{ {
struct pglist_data *pgdat = zone->zone_pgdat; struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id; int nid = pgdat->node_id;
...@@ -824,7 +824,8 @@ void __ref move_pfn_range_to_zone(struct zone *zone, ...@@ -824,7 +824,8 @@ void __ref move_pfn_range_to_zone(struct zone *zone,
* expects the zone spans the pfn range. All the pages in the range * expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe * are reserved so nobody should be touching them so we should be safe
*/ */
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG); memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
MEMMAP_HOTPLUG, altmap);
set_zone_contiguous(zone); set_zone_contiguous(zone);
} }
...@@ -896,7 +897,7 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid, ...@@ -896,7 +897,7 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
struct zone *zone; struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
move_pfn_range_to_zone(zone, start_pfn, nr_pages); move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
return zone; return zone;
} }
......
...@@ -5314,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat) ...@@ -5314,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
* done. Non-atomic initialization, single-pass. * done. Non-atomic initialization, single-pass.
*/ */
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context) unsigned long start_pfn, enum memmap_context context,
struct vmem_altmap *altmap)
{ {
struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
unsigned long end_pfn = start_pfn + size; unsigned long end_pfn = start_pfn + size;
pg_data_t *pgdat = NODE_DATA(nid); pg_data_t *pgdat = NODE_DATA(nid);
unsigned long pfn; unsigned long pfn;
...@@ -5417,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) ...@@ -5417,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
#ifndef __HAVE_ARCH_MEMMAP_INIT #ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \ #define memmap_init(size, nid, zone, start_pfn) \
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
#endif #endif
static int zone_batchsize(struct zone *zone) static int zone_batchsize(struct zone *zone)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment