Commit e506b996 authored by Xishi Qiu's avatar Xishi Qiu Committed by Linus Torvalds

mem-hotplug: fix node spanned pages when we have a movable node

Commit 342332e6 ("mm/page_alloc.c: introduce kernelcore=mirror
option") rewrote the calculation of node spanned pages.  But when we
have a movable node, the size of node spanned pages is double added.
That's because we have an empty normal zone, the present pages is zero,
but its spanned pages is not zero.

e.g.
    Zone ranges:
      DMA      [mem 0x0000000000001000-0x0000000000ffffff]
      DMA32    [mem 0x0000000001000000-0x00000000ffffffff]
      Normal   [mem 0x0000000100000000-0x0000007c7fffffff]
    Movable zone start for each node
      Node 1: 0x0000001080000000
      Node 2: 0x0000002080000000
      Node 3: 0x0000003080000000
      Node 4: 0x0000003c80000000
      Node 5: 0x0000004c80000000
      Node 6: 0x0000005c80000000
    Early memory node ranges
      node   0: [mem 0x0000000000001000-0x000000000009ffff]
      node   0: [mem 0x0000000000100000-0x000000007552afff]
      node   0: [mem 0x000000007bd46000-0x000000007bd46fff]
      node   0: [mem 0x000000007bdcd000-0x000000007bffffff]
      node   0: [mem 0x0000000100000000-0x000000107fffffff]
      node   1: [mem 0x0000001080000000-0x000000207fffffff]
      node   2: [mem 0x0000002080000000-0x000000307fffffff]
      node   3: [mem 0x0000003080000000-0x0000003c7fffffff]
      node   4: [mem 0x0000003c80000000-0x0000004c7fffffff]
      node   5: [mem 0x0000004c80000000-0x0000005c7fffffff]
      node   6: [mem 0x0000005c80000000-0x0000006c7fffffff]
      node   7: [mem 0x0000006c80000000-0x0000007c7fffffff]

  node1:
    Normal, start=0x1080000, present=0x0, spanned=0x1000000
    Movable, start=0x1080000, present=0x1000000, spanned=0x1000000
    pgdat, start=0x1080000, present=0x1000000, spanned=0x2000000

After this patch, the problem is fixed.

  node1:
    Normal, start=0x0, present=0x0, spanned=0x0
    Movable, start=0x1080000, present=0x1000000, spanned=0x1000000
    pgdat, start=0x1080000, present=0x1000000, spanned=0x1000000

Link: http://lkml.kernel.org/r/57A325E8.6070100@huawei.comSigned-off-by: default avatarXishi Qiu <qiuxishi@huawei.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fdd4c614
...@@ -5004,15 +5004,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -5004,15 +5004,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
break; break;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
* If not mirrored_kernelcore and ZONE_MOVABLE exists, range
* from zone_movable_pfn[nid] to end of each node should be
* ZONE_MOVABLE not ZONE_NORMAL. skip it.
*/
if (!mirrored_kernelcore && zone_movable_pfn[nid])
if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
continue;
/* /*
* Check given memblock attribute by firmware which can affect * Check given memblock attribute by firmware which can affect
* kernel memory layout. If zone==ZONE_MOVABLE but memory is * kernel memory layout. If zone==ZONE_MOVABLE but memory is
...@@ -5456,6 +5447,12 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid, ...@@ -5456,6 +5447,12 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid,
*zone_end_pfn = min(node_end_pfn, *zone_end_pfn = min(node_end_pfn,
arch_zone_highest_possible_pfn[movable_zone]); arch_zone_highest_possible_pfn[movable_zone]);
/* Adjust for ZONE_MOVABLE starting within this range */
} else if (!mirrored_kernelcore &&
*zone_start_pfn < zone_movable_pfn[nid] &&
*zone_end_pfn > zone_movable_pfn[nid]) {
*zone_end_pfn = zone_movable_pfn[nid];
/* Check if this whole range is within ZONE_MOVABLE */ /* Check if this whole range is within ZONE_MOVABLE */
} else if (*zone_start_pfn >= zone_movable_pfn[nid]) } else if (*zone_start_pfn >= zone_movable_pfn[nid])
*zone_start_pfn = *zone_end_pfn; *zone_start_pfn = *zone_end_pfn;
...@@ -5559,28 +5556,23 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, ...@@ -5559,28 +5556,23 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
* Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
* and vice versa. * and vice versa.
*/ */
if (zone_movable_pfn[nid]) { if (mirrored_kernelcore && zone_movable_pfn[nid]) {
if (mirrored_kernelcore) { unsigned long start_pfn, end_pfn;
unsigned long start_pfn, end_pfn; struct memblock_region *r;
struct memblock_region *r;
for_each_memblock(memory, r) {
for_each_memblock(memory, r) { start_pfn = clamp(memblock_region_memory_base_pfn(r),
start_pfn = clamp(memblock_region_memory_base_pfn(r), zone_start_pfn, zone_end_pfn);
zone_start_pfn, zone_end_pfn); end_pfn = clamp(memblock_region_memory_end_pfn(r),
end_pfn = clamp(memblock_region_memory_end_pfn(r), zone_start_pfn, zone_end_pfn);
zone_start_pfn, zone_end_pfn);
if (zone_type == ZONE_MOVABLE &&
if (zone_type == ZONE_MOVABLE && memblock_is_mirror(r))
memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn;
nr_absent += end_pfn - start_pfn;
if (zone_type == ZONE_NORMAL &&
if (zone_type == ZONE_NORMAL && !memblock_is_mirror(r))
!memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn;
nr_absent += end_pfn - start_pfn;
}
} else {
if (zone_type == ZONE_NORMAL)
nr_absent += node_end_pfn - zone_movable_pfn[nid];
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment