Commit 18ea7e71 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: remember what the preferred zone is for zone_statistics

On NUMA, zone_statistics() is used to record events like numa hit, miss and
foreign.  It assumes that the first zone in a zonelist is the preferred zone.
When multiple zonelists are replaced by one that is filtered, this is no
longer the case.

This patch records what the preferred zone is rather than assuming the first
zone in the zonelist is it.  This simplifies the reading of later patches in
this set.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0e88460d
......@@ -174,7 +174,7 @@ static inline unsigned long node_page_state(int node,
zone_page_state(&zones[ZONE_MOVABLE], item);
}
extern void zone_statistics(struct zonelist *, struct zone *);
extern void zone_statistics(struct zone *, struct zone *);
#else
......
......@@ -1050,7 +1050,7 @@ void split_page(struct page *page, unsigned int order)
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
*/
static struct page *buffered_rmqueue(struct zonelist *zonelist,
static struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags)
{
unsigned long flags;
......@@ -1102,7 +1102,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(zonelist, zone);
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
put_cpu();
......@@ -1383,7 +1383,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
struct zone **z;
struct page *page = NULL;
int classzone_idx = zone_idx(zonelist->zones[0]);
struct zone *zone;
struct zone *zone, *preferred_zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
......@@ -1395,6 +1395,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
z = zonelist->zones;
preferred_zone = *z;
do {
/*
......@@ -1433,7 +1434,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
}
}
page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
if (page)
break;
this_zone_full:
......
......@@ -364,13 +364,13 @@ void refresh_cpu_vm_stats(int cpu)
*
* Must be called with interrupts disabled.
*/
void zone_statistics(struct zonelist *zonelist, struct zone *z)
void zone_statistics(struct zone *preferred_zone, struct zone *z)
{
if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
if (z->zone_pgdat == preferred_zone->zone_pgdat) {
__inc_zone_state(z, NUMA_HIT);
} else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
if (z->node == numa_node_id())
__inc_zone_state(z, NUMA_LOCAL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment