Commit 72675e13 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, memory_hotplug: drop zone from build_all_zonelists

build_all_zonelists gets a zone parameter to initialize zone's pagesets.
There is only a single user which gives a non-NULL zone parameter and
that one doesn't really need the rest of the build_all_zonelists (see
commit 6dcd73d7 ("memory-hotplug: allocate zone's pcp before
onlining pages")).

Therefore remove setup_zone_pageset from build_all_zonelists and call it
from its only user directly.  This will also remove a pointless zonlists
rebuilding which is always good.

Link: http://lkml.kernel.org/r/20170721143915.14161-5-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d9c9a0b9
...@@ -771,7 +771,7 @@ static inline bool is_dev_zone(const struct zone *zone) ...@@ -771,7 +771,7 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h> #include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex; extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags, int classzone_idx, unsigned int alloc_flags,
......
...@@ -542,7 +542,7 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -542,7 +542,7 @@ asmlinkage __visible void __init start_kernel(void)
boot_cpu_state_init(); boot_cpu_state_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL); build_all_zonelists(NULL);
page_alloc_init(); page_alloc_init();
pr_notice("Kernel command line: %s\n", boot_command_line); pr_notice("Kernel command line: %s\n", boot_command_line);
......
...@@ -525,4 +525,5 @@ static inline bool is_migrate_highatomic_page(struct page *page) ...@@ -525,4 +525,5 @@ static inline bool is_migrate_highatomic_page(struct page *page)
return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
} }
void setup_zone_pageset(struct zone *zone);
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -929,7 +929,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ ...@@ -929,7 +929,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
mutex_lock(&zonelists_mutex); mutex_lock(&zonelists_mutex);
if (!populated_zone(zone)) { if (!populated_zone(zone)) {
need_zonelists_rebuild = 1; need_zonelists_rebuild = 1;
build_all_zonelists(NULL, zone); setup_zone_pageset(zone);
} }
ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
...@@ -950,7 +950,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ ...@@ -950,7 +950,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
if (onlined_pages) { if (onlined_pages) {
node_states_set_node(nid, &arg); node_states_set_node(nid, &arg);
if (need_zonelists_rebuild) if (need_zonelists_rebuild)
build_all_zonelists(NULL, NULL); build_all_zonelists(NULL);
else else
zone_pcp_update(zone); zone_pcp_update(zone);
} }
...@@ -1028,7 +1028,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) ...@@ -1028,7 +1028,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
* to access not-initialized zonelist, build here. * to access not-initialized zonelist, build here.
*/ */
mutex_lock(&zonelists_mutex); mutex_lock(&zonelists_mutex);
build_all_zonelists(pgdat, NULL); build_all_zonelists(pgdat);
mutex_unlock(&zonelists_mutex); mutex_unlock(&zonelists_mutex);
/* /*
...@@ -1084,7 +1084,7 @@ int try_online_node(int nid) ...@@ -1084,7 +1084,7 @@ int try_online_node(int nid)
if (pgdat->node_zonelists->_zonerefs->zone == NULL) { if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
mutex_lock(&zonelists_mutex); mutex_lock(&zonelists_mutex);
build_all_zonelists(NULL, NULL); build_all_zonelists(NULL);
mutex_unlock(&zonelists_mutex); mutex_unlock(&zonelists_mutex);
} }
...@@ -1704,7 +1704,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1704,7 +1704,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
if (!populated_zone(zone)) { if (!populated_zone(zone)) {
zone_pcp_reset(zone); zone_pcp_reset(zone);
mutex_lock(&zonelists_mutex); mutex_lock(&zonelists_mutex);
build_all_zonelists(NULL, NULL); build_all_zonelists(NULL);
mutex_unlock(&zonelists_mutex); mutex_unlock(&zonelists_mutex);
} else } else
zone_pcp_update(zone); zone_pcp_update(zone);
......
...@@ -5129,7 +5129,6 @@ static void build_zonelists(pg_data_t *pgdat) ...@@ -5129,7 +5129,6 @@ static void build_zonelists(pg_data_t *pgdat)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static void setup_zone_pageset(struct zone *zone);
/* /*
* Global mutex to protect against size modification of zonelists * Global mutex to protect against size modification of zonelists
...@@ -5209,20 +5208,14 @@ build_all_zonelists_init(void) ...@@ -5209,20 +5208,14 @@ build_all_zonelists_init(void)
* Called with zonelists_mutex held always * Called with zonelists_mutex held always
* unless system_state == SYSTEM_BOOTING. * unless system_state == SYSTEM_BOOTING.
* *
* __ref due to (1) call of __meminit annotated setup_zone_pageset * __ref due to call of __init annotated helper build_all_zonelists_init
* [we're only called with non-NULL zone through __meminit paths] and
* (2) call of __init annotated helper build_all_zonelists_init
* [protected by SYSTEM_BOOTING]. * [protected by SYSTEM_BOOTING].
*/ */
void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) void __ref build_all_zonelists(pg_data_t *pgdat)
{ {
if (system_state == SYSTEM_BOOTING) { if (system_state == SYSTEM_BOOTING) {
build_all_zonelists_init(); build_all_zonelists_init();
} else { } else {
#ifdef CONFIG_MEMORY_HOTPLUG
if (zone)
setup_zone_pageset(zone);
#endif
/* we have to stop all cpus to guarantee there is no user /* we have to stop all cpus to guarantee there is no user
of zonelist */ of zonelist */
stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL); stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL);
...@@ -5496,7 +5489,7 @@ static void __meminit zone_pageset_init(struct zone *zone, int cpu) ...@@ -5496,7 +5489,7 @@ static void __meminit zone_pageset_init(struct zone *zone, int cpu)
pageset_set_high_and_batch(zone, pcp); pageset_set_high_and_batch(zone, pcp);
} }
static void __meminit setup_zone_pageset(struct zone *zone) void __meminit setup_zone_pageset(struct zone *zone)
{ {
int cpu; int cpu;
zone->pageset = alloc_percpu(struct per_cpu_pageset); zone->pageset = alloc_percpu(struct per_cpu_pageset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment