Commit ebeac3ea authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Linus Torvalds

mm: move fold_vm_numa_events() to fix NUMA without SMP

If CONFIG_NUMA=y, but CONFIG_SMP=n (e.g. sh/migor_defconfig):

    sh4-linux-gnu-ld: mm/vmstat.o: in function `vmstat_start': vmstat.c:(.text+0x97c): undefined reference to `fold_vm_numa_events'
    sh4-linux-gnu-ld: drivers/base/node.o: in function `node_read_vmstat': node.c:(.text+0x140): undefined reference to `fold_vm_numa_events'
    sh4-linux-gnu-ld: drivers/base/node.o: in function `node_read_numastat': node.c:(.text+0x1d0): undefined reference to `fold_vm_numa_events'

Fix this by moving fold_vm_numa_events() outside the SMP-only section.

Link: https://lkml.kernel.org/r/9d16ccdd9ef32803d7100c84f737de6a749314fb.1631781495.git.geert+renesas@glider.be
Fixes: f19298b9 ("mm/vmstat: convert NUMA statistics to basic NUMA counters")
Signed-off-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Gon Solo <gonsolo@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yoshinori Sato <ysato@users.osdn.me>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 61bb6cd2
......@@ -165,6 +165,34 @@ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_zone_stat);
EXPORT_SYMBOL(vm_node_stat);
#ifdef CONFIG_NUMA
static void fold_vm_zone_numa_events(struct zone *zone)
{
unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
int cpu;
enum numa_stat_item item;
for_each_online_cpu(cpu) {
struct per_cpu_zonestat *pzstats;
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
}
for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
zone_numa_event_add(zone_numa_events[item], zone, item);
}
void fold_vm_numa_events(void)
{
struct zone *zone;
for_each_populated_zone(zone)
fold_vm_zone_numa_events(zone);
}
#endif
#ifdef CONFIG_SMP
int calculate_pressure_threshold(struct zone *zone)
......@@ -771,34 +799,6 @@ static int fold_diff(int *zone_diff, int *node_diff)
return changes;
}
#ifdef CONFIG_NUMA
static void fold_vm_zone_numa_events(struct zone *zone)
{
unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
int cpu;
enum numa_stat_item item;
for_each_online_cpu(cpu) {
struct per_cpu_zonestat *pzstats;
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
}
for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
zone_numa_event_add(zone_numa_events[item], zone, item);
}
void fold_vm_numa_events(void)
{
struct zone *zone;
for_each_populated_zone(zone)
fold_vm_zone_numa_events(zone);
}
#endif
/*
* Update the zone counters for the current cpu.
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment