Commit e3c2bfdd authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Andrew Morton

mm/memory_hotplug: allow memmap on memory hotplug request to fallback

If not supported, fallback to not using memap on memmory. This avoids
the need for callers to do the fallback.

Link: https://lkml.kernel.org/r/20230808091501.287660-3-aneesh.kumar@linux.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 04d5ea46
......@@ -211,7 +211,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (!info->length)
continue;
if (mhp_supports_memmap_on_memory(info->length))
mhp_flags |= MHP_MEMMAP_ON_MEMORY;
result = __add_memory(mgid, info->start_addr, info->length,
mhp_flags);
......
......@@ -97,6 +97,8 @@ typedef int __bitwise mhp_t;
* To do so, we will use the beginning of the hot-added range to build
* the page tables for the memmap array that describes the entire range.
* Only selected architectures support it with SPARSE_VMEMMAP.
* This is only a hint, the core kernel can decide to not do this based on
* different alignment checks.
*/
#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
/*
......@@ -354,7 +356,6 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid,
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
void arch_remove_linear_mapping(u64 start, u64 size);
extern bool mhp_supports_memmap_on_memory(unsigned long size);
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* __LINUX_MEMORY_HOTPLUG_H */
......@@ -1247,7 +1247,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
return device_online(&mem->dev);
}
bool mhp_supports_memmap_on_memory(unsigned long size)
static bool mhp_supports_memmap_on_memory(unsigned long size)
{
unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
......@@ -1339,14 +1339,13 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
* Self hosted memmap array
*/
if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
if (!mhp_supports_memmap_on_memory(size)) {
ret = -EINVAL;
goto error;
}
if (mhp_supports_memmap_on_memory(size)) {
mhp_altmap.free = PHYS_PFN(size);
mhp_altmap.base_pfn = PHYS_PFN(start);
params.altmap = &mhp_altmap;
}
/* fallback to not using altmap */
}
/* call arch's memory hotadd */
ret = arch_add_memory(nid, start, size, &params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment