Commit 060e7417 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, page_alloc: inline zone_statistics

zone_statistics has one call-site but it's a public function.  Make it
static and inline.

The performance difference on a page allocator microbenchmark is;

                                             4.6.0-rc2                  4.6.0-rc2
                                      statbranch-v1r20           statinline-v1r20
  Min      alloc-odr0-1               419.00 (  0.00%)           412.00 (  1.67%)
  Min      alloc-odr0-2               305.00 (  0.00%)           301.00 (  1.31%)
  Min      alloc-odr0-4               250.00 (  0.00%)           247.00 (  1.20%)
  Min      alloc-odr0-8               219.00 (  0.00%)           215.00 (  1.83%)
  Min      alloc-odr0-16              203.00 (  0.00%)           199.00 (  1.97%)
  Min      alloc-odr0-32              195.00 (  0.00%)           191.00 (  2.05%)
  Min      alloc-odr0-64              191.00 (  0.00%)           187.00 (  2.09%)
  Min      alloc-odr0-128             189.00 (  0.00%)           185.00 (  2.12%)
  Min      alloc-odr0-256             198.00 (  0.00%)           193.00 (  2.53%)
  Min      alloc-odr0-512             210.00 (  0.00%)           207.00 (  1.43%)
  Min      alloc-odr0-1024            216.00 (  0.00%)           213.00 (  1.39%)
  Min      alloc-odr0-2048            221.00 (  0.00%)           220.00 (  0.45%)
  Min      alloc-odr0-4096            227.00 (  0.00%)           226.00 (  0.44%)
  Min      alloc-odr0-8192            232.00 (  0.00%)           229.00 (  1.29%)
  Min      alloc-odr0-16384           232.00 (  0.00%)           229.00 (  1.29%)
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b9f00e14
...@@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, ...@@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern unsigned long node_page_state(int node, enum zone_stat_item item); extern unsigned long node_page_state(int node, enum zone_stat_item item);
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#else #else
#define node_page_state(node, item) global_page_state(item) #define node_page_state(node, item) global_page_state(item)
#define zone_statistics(_zl, _z, gfp) do { } while (0)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
......
...@@ -2354,6 +2354,37 @@ int split_free_page(struct page *page) ...@@ -2354,6 +2354,37 @@ int split_free_page(struct page *page)
return nr_pages; return nr_pages;
} }
/*
* Update NUMA hit/miss statistics
*
* Must be called with interrupts disabled.
*
* When __GFP_OTHER_NODE is set assume the node of the preferred
* zone is the local node. This is useful for daemons who allocate
* memory on behalf of other processes.
*/
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
gfp_t flags)
{
#ifdef CONFIG_NUMA
int local_nid = numa_node_id();
enum zone_stat_item local_stat = NUMA_LOCAL;
if (unlikely(flags & __GFP_OTHER_NODE)) {
local_stat = NUMA_OTHER;
local_nid = preferred_zone->node;
}
if (z->node == local_nid) {
__inc_zone_state(z, NUMA_HIT);
__inc_zone_state(z, local_stat);
} else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
#endif
}
/* /*
* Allocate a page from the given zone. Use pcplists for order-0 allocations. * Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/ */
......
...@@ -569,35 +569,6 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) ...@@ -569,35 +569,6 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
#endif #endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/*
* zonelist = the list of zones passed to the allocator
* z = the zone from which the allocation occurred.
*
* Must be called with interrupts disabled.
*
* When __GFP_OTHER_NODE is set assume the node of the preferred
* zone is the local node. This is useful for daemons who allocate
* memory on behalf of other processes.
*/
void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
{
int local_nid = numa_node_id();
enum zone_stat_item local_stat = NUMA_LOCAL;
if (unlikely(flags & __GFP_OTHER_NODE)) {
local_stat = NUMA_OTHER;
local_nid = preferred_zone->node;
}
if (z->node == local_nid) {
__inc_zone_state(z, NUMA_HIT);
__inc_zone_state(z, local_stat);
} else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
}
/* /*
* Determine the per node value of a stat item. * Determine the per node value of a stat item.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment