Commit 1b862aec authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, memory_hotplug: get rid of is_zone_device_section

Device memory hotplug hooks into regular memory hotplug only half way.
It needs memory sections to track struct pages but there is no
need/desire to associate those sections with memory blocks and export
them to the userspace via sysfs because they cannot be onlined anyway.

This is currently expressed by for_device argument to arch_add_memory
which then makes sure to associate the given memory range with
ZONE_DEVICE.  register_new_memory then relies on is_zone_device_section
to distinguish special memory hotplug from the regular one.  While this
works now, later patches in this series want to move __add_zone outside
of arch_add_memory path so we have to come up with something else.

Add want_memblock down the __add_pages path and use it to control
whether the section->memblock association should be done.
arch_add_memory then just trivially want memblock for everything but
for_device hotplug.

remove_memory_section doesn't need is_zone_device_section either.  We
can simply skip all the memblock specific cleanup if there is no
memblock for the given section.

This shouldn't introduce any functional change.

Link: http://lkml.kernel.org/r/20170515085827.16474-5-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Daniel Kiper <daniel.kiper@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Reza Arbab <arbab@linux.vnet.ibm.com>
Cc: Tobias Regnery <tobias.regnery@gmail.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bfe63d3b
...@@ -658,7 +658,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -658,7 +658,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
zone = pgdat->node_zones + zone = pgdat->node_zones +
zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
ret = __add_pages(nid, zone, start_pfn, nr_pages); ret = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
if (ret) if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n", printk("%s: Problem encountered in __add_pages() as ret=%d\n",
......
...@@ -151,7 +151,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -151,7 +151,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
zone = pgdata->node_zones + zone = pgdata->node_zones +
zone_for_memory(nid, start, size, 0, for_device); zone_for_memory(nid, start, size, 0, for_device);
return __add_pages(nid, zone, start_pfn, nr_pages); return __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
......
...@@ -195,7 +195,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -195,7 +195,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
continue; continue;
nr_pages = (start_pfn + size_pages > zone_end_pfn) ? nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
zone_end_pfn - start_pfn : size_pages; zone_end_pfn - start_pfn : size_pages;
rc = __add_pages(nid, zone, start_pfn, nr_pages); rc = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
if (rc) if (rc)
break; break;
start_pfn += nr_pages; start_pfn += nr_pages;
......
...@@ -498,7 +498,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -498,7 +498,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
ret = __add_pages(nid, pgdat->node_zones + ret = __add_pages(nid, pgdat->node_zones +
zone_for_memory(nid, start, size, ZONE_NORMAL, zone_for_memory(nid, start, size, ZONE_NORMAL,
for_device), for_device),
start_pfn, nr_pages); start_pfn, nr_pages, !for_device);
if (unlikely(ret)) if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret); printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
......
...@@ -831,7 +831,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -831,7 +831,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
return __add_pages(nid, zone, start_pfn, nr_pages); return __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
......
...@@ -787,7 +787,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device) ...@@ -787,7 +787,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
init_memory_mapping(start, start + size); init_memory_mapping(start, start + size);
ret = __add_pages(nid, zone, start_pfn, nr_pages); ret = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */ /* update max_pfn, max_low_pfn and high_memory */
......
...@@ -685,14 +685,6 @@ static int add_memory_block(int base_section_nr) ...@@ -685,14 +685,6 @@ static int add_memory_block(int base_section_nr)
return 0; return 0;
} }
static bool is_zone_device_section(struct mem_section *ms)
{
struct page *page;
page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
return is_zone_device_page(page);
}
/* /*
* need an interface for the VM to add new memory regions, * need an interface for the VM to add new memory regions,
* but without onlining it. * but without onlining it.
...@@ -702,9 +694,6 @@ int register_new_memory(int nid, struct mem_section *section) ...@@ -702,9 +694,6 @@ int register_new_memory(int nid, struct mem_section *section)
int ret = 0; int ret = 0;
struct memory_block *mem; struct memory_block *mem;
if (is_zone_device_section(section))
return 0;
mutex_lock(&mem_sysfs_mutex); mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section); mem = find_memory_block(section);
...@@ -741,11 +730,16 @@ static int remove_memory_section(unsigned long node_id, ...@@ -741,11 +730,16 @@ static int remove_memory_section(unsigned long node_id,
{ {
struct memory_block *mem; struct memory_block *mem;
if (is_zone_device_section(section))
return 0;
mutex_lock(&mem_sysfs_mutex); mutex_lock(&mem_sysfs_mutex);
/*
* Some users of the memory hotplug do not want/need memblock to
* track all sections. Skip over those.
*/
mem = find_memory_block(section); mem = find_memory_block(section);
if (!mem)
goto out_unlock;
unregister_mem_sect_under_nodes(mem, __section_nr(section)); unregister_mem_sect_under_nodes(mem, __section_nr(section));
mem->section_count--; mem->section_count--;
...@@ -754,6 +748,7 @@ static int remove_memory_section(unsigned long node_id, ...@@ -754,6 +748,7 @@ static int remove_memory_section(unsigned long node_id,
else else
put_device(&mem->dev); put_device(&mem->dev);
out_unlock:
mutex_unlock(&mem_sysfs_mutex); mutex_unlock(&mem_sysfs_mutex);
return 0; return 0;
} }
......
...@@ -111,7 +111,7 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn, ...@@ -111,7 +111,7 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
/* reasonably generic interface to expand the physical pages in a zone */ /* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages); unsigned long nr_pages, bool want_memblock);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start); extern int memory_add_physaddr_to_nid(u64 start);
......
...@@ -494,7 +494,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) ...@@ -494,7 +494,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
} }
static int __meminit __add_section(int nid, struct zone *zone, static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn) unsigned long phys_start_pfn, bool want_memblock)
{ {
int ret; int ret;
...@@ -511,6 +511,9 @@ static int __meminit __add_section(int nid, struct zone *zone, ...@@ -511,6 +511,9 @@ static int __meminit __add_section(int nid, struct zone *zone,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!want_memblock)
return 0;
return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
} }
...@@ -521,7 +524,7 @@ static int __meminit __add_section(int nid, struct zone *zone, ...@@ -521,7 +524,7 @@ static int __meminit __add_section(int nid, struct zone *zone,
* add the new pages. * add the new pages.
*/ */
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages) unsigned long nr_pages, bool want_memblock)
{ {
unsigned long i; unsigned long i;
int err = 0; int err = 0;
...@@ -549,7 +552,7 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, ...@@ -549,7 +552,7 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
} }
for (i = start_sec; i <= end_sec; i++) { for (i = start_sec; i <= end_sec; i++) {
err = __add_section(nid, zone, section_nr_to_pfn(i)); err = __add_section(nid, zone, section_nr_to_pfn(i), want_memblock);
/* /*
* EEXIST is finally dealt with by ioresource collision * EEXIST is finally dealt with by ioresource collision
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment