Commit 854e8848 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

mm: clean up free_area_init_node() and its helpers

free_area_init_node() now always uses memblock info and the zone PFN
limits so it does not need the backwards compatibility functions to
calculate the zone spanned and absent pages.  The removal of the compat_
versions of zone_{abscent,spanned}_pages_in_node() in turn, makes
zone_size and zhole_size parameters unused.

The node_start_pfn is determined by get_pfn_range_for_nid(), so there is
no need to pass it to free_area_init_node().

As a result, the only required parameter to free_area_init_node() is the
node ID, all the rest are removed along with no longer used
compat_zone_{abscent,spanned}_pages_in_node() helpers.
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: Hoan Tran <hoan@os.amperecomputing.com>	[arm64]
Cc: Baoquan He <bhe@redhat.com>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200412194859.12663-20-rppt@kernel.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bc9331a1
...@@ -6436,8 +6436,7 @@ static unsigned long __init zone_spanned_pages_in_node(int nid, ...@@ -6436,8 +6436,7 @@ static unsigned long __init zone_spanned_pages_in_node(int nid,
unsigned long node_start_pfn, unsigned long node_start_pfn,
unsigned long node_end_pfn, unsigned long node_end_pfn,
unsigned long *zone_start_pfn, unsigned long *zone_start_pfn,
unsigned long *zone_end_pfn, unsigned long *zone_end_pfn)
unsigned long *ignored)
{ {
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
...@@ -6501,8 +6500,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, ...@@ -6501,8 +6500,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
static unsigned long __init zone_absent_pages_in_node(int nid, static unsigned long __init zone_absent_pages_in_node(int nid,
unsigned long zone_type, unsigned long zone_type,
unsigned long node_start_pfn, unsigned long node_start_pfn,
unsigned long node_end_pfn, unsigned long node_end_pfn)
unsigned long *ignored)
{ {
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
...@@ -6549,43 +6547,9 @@ static unsigned long __init zone_absent_pages_in_node(int nid, ...@@ -6549,43 +6547,9 @@ static unsigned long __init zone_absent_pages_in_node(int nid,
return nr_absent; return nr_absent;
} }
static inline unsigned long __init compat_zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
unsigned long *zone_start_pfn,
unsigned long *zone_end_pfn,
unsigned long *zones_size)
{
unsigned int zone;
*zone_start_pfn = node_start_pfn;
for (zone = 0; zone < zone_type; zone++)
*zone_start_pfn += zones_size[zone];
*zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
return zones_size[zone_type];
}
static inline unsigned long __init compat_zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
unsigned long *zholes_size)
{
if (!zholes_size)
return 0;
return zholes_size[zone_type];
}
static void __init calculate_node_totalpages(struct pglist_data *pgdat, static void __init calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long node_start_pfn, unsigned long node_start_pfn,
unsigned long node_end_pfn, unsigned long node_end_pfn)
unsigned long *zones_size,
unsigned long *zholes_size,
bool compat)
{ {
unsigned long realtotalpages = 0, totalpages = 0; unsigned long realtotalpages = 0, totalpages = 0;
enum zone_type i; enum zone_type i;
...@@ -6596,31 +6560,14 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, ...@@ -6596,31 +6560,14 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long spanned, absent; unsigned long spanned, absent;
unsigned long size, real_size; unsigned long size, real_size;
if (compat) { spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
spanned = compat_zone_spanned_pages_in_node( node_start_pfn,
pgdat->node_id, i, node_end_pfn,
node_start_pfn, &zone_start_pfn,
node_end_pfn, &zone_end_pfn);
&zone_start_pfn, absent = zone_absent_pages_in_node(pgdat->node_id, i,
&zone_end_pfn, node_start_pfn,
zones_size); node_end_pfn);
absent = compat_zone_absent_pages_in_node(
pgdat->node_id, i,
node_start_pfn,
node_end_pfn,
zholes_size);
} else {
spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
node_start_pfn,
node_end_pfn,
&zone_start_pfn,
&zone_end_pfn,
zones_size);
absent = zone_absent_pages_in_node(pgdat->node_id, i,
node_start_pfn,
node_end_pfn,
zholes_size);
}
size = spanned; size = spanned;
real_size = size - absent; real_size = size - absent;
...@@ -6942,10 +6889,7 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat) ...@@ -6942,10 +6889,7 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
#endif #endif
static void __init __free_area_init_node(int nid, unsigned long *zones_size, static void __init free_area_init_node(int nid)
unsigned long node_start_pfn,
unsigned long *zholes_size,
bool compat)
{ {
pg_data_t *pgdat = NODE_DATA(nid); pg_data_t *pgdat = NODE_DATA(nid);
unsigned long start_pfn = 0; unsigned long start_pfn = 0;
...@@ -6954,19 +6898,16 @@ static void __init __free_area_init_node(int nid, unsigned long *zones_size, ...@@ -6954,19 +6898,16 @@ static void __init __free_area_init_node(int nid, unsigned long *zones_size,
/* pg_data_t should be reset to zero when it's allocated */ /* pg_data_t should be reset to zero when it's allocated */
WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
pgdat->node_id = nid; pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn; pgdat->node_start_pfn = start_pfn;
pgdat->per_cpu_nodestats = NULL; pgdat->per_cpu_nodestats = NULL;
if (!compat) {
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT,
(u64)start_pfn << PAGE_SHIFT, end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); calculate_node_totalpages(pgdat, start_pfn, end_pfn);
} else {
start_pfn = node_start_pfn;
}
calculate_node_totalpages(pgdat, start_pfn, end_pfn,
zones_size, zholes_size, compat);
alloc_node_mem_map(pgdat); alloc_node_mem_map(pgdat);
pgdat_set_deferred_range(pgdat); pgdat_set_deferred_range(pgdat);
...@@ -6976,7 +6917,7 @@ static void __init __free_area_init_node(int nid, unsigned long *zones_size, ...@@ -6976,7 +6917,7 @@ static void __init __free_area_init_node(int nid, unsigned long *zones_size,
void __init free_area_init_memoryless_node(int nid) void __init free_area_init_memoryless_node(int nid)
{ {
__free_area_init_node(nid, NULL, 0, NULL, false); free_area_init_node(nid);
} }
#if !defined(CONFIG_FLAT_NODE_MEM_MAP) #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
...@@ -7506,8 +7447,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) ...@@ -7506,8 +7447,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
init_unavailable_mem(); init_unavailable_mem();
for_each_online_node(nid) { for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid); pg_data_t *pgdat = NODE_DATA(nid);
__free_area_init_node(nid, NULL, free_area_init_node(nid);
find_min_pfn_for_node(nid), NULL, false);
/* Any memory on that node */ /* Any memory on that node */
if (pgdat->node_present_pages) if (pgdat->node_present_pages)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment