Commit 2a1e274a authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

Create the ZONE_MOVABLE zone

The following 8 patches against 2.6.20-mm2 create a zone called ZONE_MOVABLE
that is only usable by allocations that specify both __GFP_HIGHMEM and
__GFP_MOVABLE.  This has the effect of keeping all non-movable pages within a
single memory partition while allowing movable allocations to be satisfied
from either partition.  The patches may be applied with the list-based
anti-fragmentation patches that groups pages together based on mobility.

The size of the zone is determined by a kernelcore= parameter specified at
boot-time.  This specifies how much memory is usable by non-movable
allocations and the remainder is used for ZONE_MOVABLE.  Any range of pages
within ZONE_MOVABLE can be released by migrating the pages or by reclaiming.

When selecting a zone to take pages from for ZONE_MOVABLE, there are two
things to consider.  First, only memory from the highest populated zone is
used for ZONE_MOVABLE.  On the x86, this is probably going to be ZONE_HIGHMEM
but it would be ZONE_DMA on ppc64 or possibly ZONE_DMA32 on x86_64.  Second,
the amount of memory usable by the kernel will be spread evenly throughout
NUMA nodes where possible.  If the nodes are not of equal size, the amount of
memory usable by the kernel on some nodes may be greater than others.

By default, the zone is not as useful for hugetlb allocations because they are
pinned and non-migratable (currently at least).  A sysctl is provided that
allows huge pages to be allocated from that zone.  This means that the huge
page pool can be resized to the size of ZONE_MOVABLE during the lifetime of
the system assuming that pages are not mlocked.  Despite huge pages being
non-movable, we do not introduce additional external fragmentation of note as
huge pages are always the largest contiguous block we care about.

Credit goes to Andy Whitcroft for catching a large variety of problems during
review of the patches.

This patch creates an additional zone, ZONE_MOVABLE.  This zone is only usable
by allocations which specify both __GFP_HIGHMEM and __GFP_MOVABLE.  Hot-added
memory continues to be placed in their existing destination as there is no
mechanism to redirect them to a specific zone.

[y-goto@jp.fujitsu.com: Fix section mismatch of memory hotplug related code]
[akpm@linux-foundation.org: various fixes]
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: default avatarYasunori Goto <y-goto@jp.fujitsu.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 769848c0
...@@ -106,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags) ...@@ -106,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
if (flags & __GFP_DMA32) if (flags & __GFP_DMA32)
return ZONE_DMA32; return ZONE_DMA32;
#endif #endif
if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
(__GFP_HIGHMEM | __GFP_MOVABLE))
return ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM) if (flags & __GFP_HIGHMEM)
return ZONE_HIGHMEM; return ZONE_HIGHMEM;
......
...@@ -1005,6 +1005,7 @@ extern unsigned long find_max_pfn_with_active_regions(void); ...@@ -1005,6 +1005,7 @@ extern unsigned long find_max_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid, extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn); unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid); extern void sparse_memory_present_with_active_regions(int nid);
extern int cmdline_parse_kernelcore(char *p);
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
extern int early_pfn_to_nid(unsigned long pfn); extern int early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
......
...@@ -146,6 +146,7 @@ enum zone_type { ...@@ -146,6 +146,7 @@ enum zone_type {
*/ */
ZONE_HIGHMEM, ZONE_HIGHMEM,
#endif #endif
ZONE_MOVABLE,
MAX_NR_ZONES MAX_NR_ZONES
}; };
...@@ -167,6 +168,7 @@ enum zone_type { ...@@ -167,6 +168,7 @@ enum zone_type {
+ defined(CONFIG_ZONE_DMA32) \ + defined(CONFIG_ZONE_DMA32) \
+ 1 \ + 1 \
+ defined(CONFIG_HIGHMEM) \ + defined(CONFIG_HIGHMEM) \
+ 1 \
) )
#if __ZONE_COUNT < 2 #if __ZONE_COUNT < 2
#define ZONES_SHIFT 0 #define ZONES_SHIFT 0
...@@ -499,10 +501,22 @@ static inline int populated_zone(struct zone *zone) ...@@ -499,10 +501,22 @@ static inline int populated_zone(struct zone *zone)
return (!!zone->present_pages); return (!!zone->present_pages);
} }
extern int movable_zone;
static inline int zone_movable_is_highmem(void)
{
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
return movable_zone == ZONE_HIGHMEM;
#else
return 0;
#endif
}
static inline int is_highmem_idx(enum zone_type idx) static inline int is_highmem_idx(enum zone_type idx)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM); return (idx == ZONE_HIGHMEM ||
(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
#else #else
return 0; return 0;
#endif #endif
...@@ -522,7 +536,9 @@ static inline int is_normal_idx(enum zone_type idx) ...@@ -522,7 +536,9 @@ static inline int is_normal_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone) static inline int is_highmem(struct zone *zone)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; int zone_idx = zone - zone->zone_pgdat->node_zones;
return zone_idx == ZONE_HIGHMEM ||
(zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
#else #else
return 0; return 0;
#endif #endif
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define HIGHMEM_ZONE(xx) #define HIGHMEM_ZONE(xx)
#endif #endif
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC), FOR_ALL_ZONES(PGALLOC),
...@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node, ...@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) + zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif #endif
zone_page_state(&zones[ZONE_NORMAL], item); zone_page_state(&zones[ZONE_NORMAL], item) +
zone_page_state(&zones[ZONE_MOVABLE], item);
} }
extern void zone_statistics(struct zonelist *, struct zone *); extern void zone_statistics(struct zonelist *, struct zone *);
......
...@@ -46,9 +46,14 @@ unsigned int nr_free_highpages (void) ...@@ -46,9 +46,14 @@ unsigned int nr_free_highpages (void)
pg_data_t *pgdat; pg_data_t *pgdat;
unsigned int pages = 0; unsigned int pages = 0;
for_each_online_pgdat(pgdat) for_each_online_pgdat(pgdat) {
pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES); NR_FREE_PAGES);
if (zone_movable_is_highmem())
pages += zone_page_state(
&pgdat->node_zones[ZONE_MOVABLE],
NR_FREE_PAGES);
}
return pages; return pages;
} }
......
This diff is collapsed.
...@@ -472,7 +472,7 @@ const struct seq_operations fragmentation_op = { ...@@ -472,7 +472,7 @@ const struct seq_operations fragmentation_op = {
#endif #endif
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
TEXT_FOR_HIGHMEM(xx) TEXT_FOR_HIGHMEM(xx) xx "_movable",
static const char * const vmstat_text[] = { static const char * const vmstat_text[] = {
/* Zoned VM counters */ /* Zoned VM counters */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment