Commit 692d73d2 authored by Mike Rapoport (Microsoft)'s avatar Mike Rapoport (Microsoft) Committed by Andrew Morton

mm: numa_memblks: introduce numa_memblks_init

Move most of x86::numa_init() to numa_memblks so that the latter will be
more self-contained.

With this numa_memblk data structures should not be exposed to the
architecture specific code.

Link: https://lkml.kernel.org/r/20240807064110.1003856-21-rppt@kernel.orgSigned-off-by: default avatarMike Rapoport (Microsoft) <rppt@kernel.org>
Tested-by: Zi Yan <ziy@nvidia.com> # for x86_64 and arm64
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> [arm64 + CXL via QEMU]
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Rob Herring (Arm) <robh@kernel.org>
Cc: Samuel Holland <samuel.holland@sifive.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b0c4e27c
...@@ -115,13 +115,9 @@ void __init setup_node_to_cpumask_map(void) ...@@ -115,13 +115,9 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
} }
static int __init numa_register_memblks(struct numa_meminfo *mi) static int __init numa_register_nodes(void)
{ {
int nid, err; int nid;
err = numa_register_meminfo(mi);
if (err)
return err;
if (!memblock_validate_numa_coverage(SZ_1M)) if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL; return -EINVAL;
...@@ -175,39 +171,11 @@ static int __init numa_init(int (*init_func)(void)) ...@@ -175,39 +171,11 @@ static int __init numa_init(int (*init_func)(void))
for (i = 0; i < MAX_LOCAL_APIC; i++) for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE); set_apicid_to_node(i, NUMA_NO_NODE);
nodes_clear(numa_nodes_parsed); ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
NUMA_NO_NODE));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
NUMA_NO_NODE));
/* In case that parsing SRAT failed. */
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
ret = init_func();
if (ret < 0) if (ret < 0)
return ret; return ret;
/* ret = numa_register_nodes();
* We reset memblock back to the top-down direction
* here because if we configured ACPI_NUMA, we have
* parsed SRAT in init_func(). It is ok to have the
* reset here even if we did't configure ACPI_NUMA
* or acpi numa init fails and fallbacks to dummy
* numa init.
*/
memblock_set_bottom_up(false);
ret = numa_cleanup_meminfo(&numa_meminfo);
if (ret < 0)
return ret;
numa_emulation(&numa_meminfo, numa_distance_cnt);
ret = numa_register_memblks(&numa_meminfo);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -34,6 +34,9 @@ int __init numa_register_meminfo(struct numa_meminfo *mi); ...@@ -34,6 +34,9 @@ int __init numa_register_meminfo(struct numa_meminfo *mi);
void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
const struct numa_meminfo *mi); const struct numa_meminfo *mi);
int __init numa_memblks_init(int (*init_func)(void),
bool memblock_force_top_down);
#ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU
int numa_emu_cmdline(char *str); int numa_emu_cmdline(char *str);
void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
......
...@@ -415,6 +415,47 @@ int __init numa_register_meminfo(struct numa_meminfo *mi) ...@@ -415,6 +415,47 @@ int __init numa_register_meminfo(struct numa_meminfo *mi)
return 0; return 0;
} }
int __init numa_memblks_init(int (*init_func)(void),
bool memblock_force_top_down)
{
int ret;
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
NUMA_NO_NODE));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
NUMA_NO_NODE));
/* In case that parsing SRAT failed. */
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
ret = init_func();
if (ret < 0)
return ret;
/*
* We reset memblock back to the top-down direction
* here because if we configured ACPI_NUMA, we have
* parsed SRAT in init_func(). It is ok to have the
* reset here even if we did't configure ACPI_NUMA
* or acpi numa init fails and fallbacks to dummy
* numa init.
*/
if (memblock_force_top_down)
memblock_set_bottom_up(false);
ret = numa_cleanup_meminfo(&numa_meminfo);
if (ret < 0)
return ret;
numa_emulation(&numa_meminfo, numa_distance_cnt);
return numa_register_meminfo(&numa_meminfo);
}
static int __init cmp_memblk(const void *a, const void *b) static int __init cmp_memblk(const void *a, const void *b)
{ {
const struct numa_memblk *ma = *(const struct numa_memblk **)a; const struct numa_memblk *ma = *(const struct numa_memblk **)a;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment