Commit e2bab3d9 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] percpu_counter_sum()

Implement percpu_counter_sum().  This is a more accurate but slower version of
percpu_counter_read_positive().

We need this for Alex's speedup-ext3_statfs patch and for the nr_file
accounting fix.  Otherwise these things would be too inaccurate on large CPU
counts.

Cc: Ravikiran G Thirumalai <kiran@scalex86.org>
Cc: Alex Tomas <alex@clusterfs.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b884e257
...@@ -39,6 +39,7 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc) ...@@ -39,6 +39,7 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
} }
void percpu_counter_mod(struct percpu_counter *fbc, long amount); void percpu_counter_mod(struct percpu_counter *fbc, long amount);
long percpu_counter_sum(struct percpu_counter *fbc);
static inline long percpu_counter_read(struct percpu_counter *fbc) static inline long percpu_counter_read(struct percpu_counter *fbc)
{ {
...@@ -92,6 +93,11 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc) ...@@ -92,6 +93,11 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
return fbc->count; return fbc->count;
} }
static inline long percpu_counter_sum(struct percpu_counter *fbc)
{
return percpu_counter_read_positive(fbc);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc) static inline void percpu_counter_inc(struct percpu_counter *fbc)
......
...@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount) ...@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
if (count >= FBC_BATCH || count <= -FBC_BATCH) { if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock); spin_lock(&fbc->lock);
fbc->count += count; fbc->count += count;
*pcount = 0;
spin_unlock(&fbc->lock); spin_unlock(&fbc->lock);
count = 0; } else {
}
*pcount = count; *pcount = count;
}
put_cpu(); put_cpu();
} }
EXPORT_SYMBOL(percpu_counter_mod); EXPORT_SYMBOL(percpu_counter_mod);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
long percpu_counter_sum(struct percpu_counter *fbc)
{
long ret;
int cpu;
spin_lock(&fbc->lock);
ret = fbc->count;
for_each_cpu(cpu) {
long *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
spin_unlock(&fbc->lock);
return ret < 0 ? 0 : ret;
}
EXPORT_SYMBOL(percpu_counter_sum);
#endif #endif
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment