Commit 00d4d5a9 authored by Stephen Lord's avatar Stephen Lord Committed by Stephen Lord

[XFS] Re-work pagebuf & xfs stats to use per-cpu variables -

big globals that are written all the time

SGI Modid: 2.5.x-xfs:slinx:159069a
parent 2c807456
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "xfs.h" #include "xfs.h"
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
struct xfsstats xfsstats; DEFINE_PER_CPU(struct xfsstats, xfsstats);
STATIC int STATIC int
xfs_read_xfsstats( xfs_read_xfsstats(
...@@ -44,7 +44,11 @@ xfs_read_xfsstats( ...@@ -44,7 +44,11 @@ xfs_read_xfsstats(
int *eof, int *eof,
void *data) void *data)
{ {
int i, j, len; int c, i, j, len, val;
__uint64_t xs_xstrat_bytes = 0;
__uint64_t xs_write_bytes = 0;
__uint64_t xs_read_bytes = 0;
static struct xstats_entry { static struct xstats_entry {
char *desc; char *desc;
int endpoint; int endpoint;
...@@ -65,21 +69,32 @@ xfs_read_xfsstats( ...@@ -65,21 +69,32 @@ xfs_read_xfsstats(
{ "vnodes", XFSSTAT_END_VNODE_OPS }, { "vnodes", XFSSTAT_END_VNODE_OPS },
}; };
/* Loop over all stats groups */
for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) { for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) {
len += sprintf(buffer + len, xstats[i].desc); len += sprintf(buffer + len, xstats[i].desc);
/* inner loop does each group */ /* inner loop does each group */
while (j < xstats[i].endpoint) { while (j < xstats[i].endpoint) {
len += sprintf(buffer + len, " %u", val = 0;
*(((__u32*)&xfsstats) + j)); /* sum over all cpus */
for (c = 0; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
}
len += sprintf(buffer + len, " %u", val);
j++; j++;
} }
buffer[len++] = '\n'; buffer[len++] = '\n';
} }
/* extra precision counters */ /* extra precision counters */
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i)) continue;
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
}
len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n", len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n",
xfsstats.xs_xstrat_bytes, xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
xfsstats.xs_write_bytes,
xfsstats.xs_read_bytes);
len += sprintf(buffer + len, "debug %u\n", len += sprintf(buffer + len, "debug %u\n",
#if defined(XFSDEBUG) #if defined(XFSDEBUG)
1); 1);
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) #if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
#include <linux/percpu.h>
/* /*
* XFS global statistics * XFS global statistics
*/ */
...@@ -126,11 +128,11 @@ struct xfsstats { ...@@ -126,11 +128,11 @@ struct xfsstats {
__uint64_t xs_read_bytes; __uint64_t xs_read_bytes;
}; };
extern struct xfsstats xfsstats; DECLARE_PER_CPU(struct xfsstats, xfsstats);
# define XFS_STATS_INC(count) ( xfsstats.count++ ) # define XFS_STATS_INC(count) ( get_cpu_var(xfsstats).count++ )
# define XFS_STATS_DEC(count) ( xfsstats.count-- ) # define XFS_STATS_DEC(count) ( get_cpu_var(xfsstats).count-- )
# define XFS_STATS_ADD(count, inc) ( xfsstats.count += (inc) ) # define XFS_STATS_ADD(count, inc) ( get_cpu_var(xfsstats).count += (inc) )
extern void xfs_init_procfs(void); extern void xfs_init_procfs(void);
extern void xfs_cleanup_procfs(void); extern void xfs_cleanup_procfs(void);
......
...@@ -48,17 +48,23 @@ xfs_stats_clear_proc_handler( ...@@ -48,17 +48,23 @@ xfs_stats_clear_proc_handler(
void *buffer, void *buffer,
size_t *lenp) size_t *lenp)
{ {
int ret, *valp = ctl->data; int c, ret, *valp = ctl->data;
__uint32_t vn_active; __uint32_t vn_active;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) { if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n"); printk("XFS Clearing xfsstats\n");
/* save vn_active, it's a universal truth! */ for (c = 0; c < NR_CPUS; c++) {
vn_active = xfsstats.vn_active; if (!cpu_possible(c)) continue;
memset(&xfsstats, 0, sizeof(xfsstats)); preempt_disable();
xfsstats.vn_active = vn_active; /* save vn_active, it's a universal truth! */
vn_active = per_cpu(xfsstats, c).vn_active;
memset(&per_cpu(xfsstats, c), 0,
sizeof(struct xfsstats));
per_cpu(xfsstats, c).vn_active = vn_active;
preempt_enable();
}
xfs_stats_clear = 0; xfs_stats_clear = 0;
} }
......
...@@ -141,7 +141,7 @@ pagebuf_param_t pb_params = { ...@@ -141,7 +141,7 @@ pagebuf_param_t pb_params = {
* Pagebuf statistics variables * Pagebuf statistics variables
*/ */
struct pbstats pbstats; DEFINE_PER_CPU(struct pbstats, pbstats);
/* /*
* Pagebuf allocation / freeing. * Pagebuf allocation / freeing.
...@@ -1842,14 +1842,18 @@ pb_stats_clear_handler( ...@@ -1842,14 +1842,18 @@ pb_stats_clear_handler(
void *buffer, void *buffer,
size_t *lenp) size_t *lenp)
{ {
int ret; int c, ret;
int *valp = ctl->data; int *valp = ctl->data;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) { if (!ret && write && *valp) {
printk("XFS Clearing pbstats\n"); printk("XFS Clearing pbstats\n");
memset(&pbstats, 0, sizeof(pbstats)); for (c = 0; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
memset(&per_cpu(pbstats, c), 0,
sizeof(struct pbstats));
}
pb_params.stats_clear.val = 0; pb_params.stats_clear.val = 0;
} }
...@@ -1903,13 +1907,17 @@ pagebuf_readstats( ...@@ -1903,13 +1907,17 @@ pagebuf_readstats(
int *eof, int *eof,
void *data) void *data)
{ {
int i, len; int c, i, len, val;
len = 0; len = 0;
len += sprintf(buffer + len, "pagebuf"); len += sprintf(buffer + len, "pagebuf");
for (i = 0; i < sizeof(pbstats) / sizeof(u_int32_t); i++) { for (i = 0; i < sizeof(struct pbstats) / sizeof(u_int32_t); i++) {
len += sprintf(buffer + len, " %u", val = 0;
*(((u_int32_t*)&pbstats) + i)); for (c = 0 ; c < NR_CPUS; c++) {
if (!cpu_possible(c)) continue;
val += *(((u_int32_t*)&per_cpu(pbstats, c) + i));
}
len += sprintf(buffer + len, " %u", val);
} }
buffer[len++] = '\n'; buffer[len++] = '\n';
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment