Commit 12938a92 authored by Christoph Lameter's avatar Christoph Lameter Committed by Tejun Heo

vmstat: Optimize zone counter modifications through the use of this cpu operations

this cpu operations can be used to slightly optimize the function. The
changes will avoid some address calculations and replace them with the
use of the percpu segment register.

If one would have this_cpu_inc_return and this_cpu_dec_return then it
would be possible to optimize inc_zone_page_state and
dec_zone_page_state even more.

V1->V2:
	- Fix __dec_zone_state overflow handling
	- Use s8 variables for temporary storage.

V2->V3:
	- Put __percpu annotations in correct places.
Reviewed-by: default avatarPekka Enberg <penberg@kernel.org>
Acked-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 819a72af
......@@ -167,18 +167,20 @@ static void refresh_zone_stat_thresholds(void)
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
x = delta + __this_cpu_read(*p);
x = delta + *p;
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
if (unlikely(x > t || x < -t)) {
zone_page_state_add(x, zone, item);
x = 0;
}
*p = x;
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_zone_page_state);
......@@ -221,16 +223,19 @@ EXPORT_SYMBOL(mod_zone_page_state);
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
(*p)++;
__this_cpu_inc(*p);
if (unlikely(*p > pcp->stat_threshold)) {
int overstep = pcp->stat_threshold / 2;
v = __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
s8 overstep = t >> 1;
zone_page_state_add(*p + overstep, zone, item);
*p = -overstep;
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
}
......@@ -242,16 +247,19 @@ EXPORT_SYMBOL(__inc_zone_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
(*p)--;
__this_cpu_dec(*p);
if (unlikely(*p < - pcp->stat_threshold)) {
int overstep = pcp->stat_threshold / 2;
v = __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
s8 overstep = t >> 1;
zone_page_state_add(*p - overstep, zone, item);
*p = overstep;
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment