Commit c9d4b9b1 authored by Ravikiran G. Thirumalai's avatar Ravikiran G. Thirumalai Committed by David S. Miller

[IPV4]: Convert mibstats to use kmalloc_percpu

parent a6a05f7e
......@@ -23,6 +23,7 @@
#include <net/sock.h>
#include <net/protocol.h>
#include <net/snmp.h>
#include <linux/ip.h>
struct icmp_err {
......@@ -31,10 +32,22 @@ struct icmp_err {
};
extern struct icmp_err icmp_err_convert[];
extern struct icmp_mib icmp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct icmp_mib, icmp_statistics);
#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
#define ICMP_INC_STATS_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[!in_softirq()],\
smp_processor_id())) + offt))++;
#define ICMP_INC_STATS_BH_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[0], \
smp_processor_id())) + offt))++;
#define ICMP_INC_STATS_USER_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[1], \
smp_processor_id())) + offt))++;
extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
extern int icmp_rcv(struct sk_buff *skb);
......
......@@ -149,14 +149,16 @@ struct ipv4_config
};
extern struct ipv4_config ipv4_config;
extern struct ip_mib ip_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct ip_mib, ip_statistics);
#define IP_INC_STATS(field) SNMP_INC_STATS(ip_statistics, field)
#define IP_INC_STATS_BH(field) SNMP_INC_STATS_BH(ip_statistics, field)
#define IP_INC_STATS_USER(field) SNMP_INC_STATS_USER(ip_statistics, field)
extern struct linux_mib net_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
#define NET_INC_STATS(field) SNMP_INC_STATS(net_statistics, field)
#define NET_INC_STATS_BH(field) SNMP_INC_STATS_BH(net_statistics, field)
#define NET_INC_STATS_USER(field) SNMP_INC_STATS_USER(net_statistics, field)
#define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd)
#define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd)
extern int sysctl_local_port_range[2];
extern int sysctl_ip_default_ttl;
......
......@@ -62,7 +62,7 @@ struct ip_mib
unsigned long IpFragFails;
unsigned long IpFragCreates;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct ipv6_mib
{
......@@ -89,7 +89,7 @@ struct ipv6_mib
unsigned long Ip6InMcastPkts;
unsigned long Ip6OutMcastPkts;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct icmp_mib
{
......@@ -121,7 +121,7 @@ struct icmp_mib
unsigned long IcmpOutAddrMaskReps;
unsigned long dummy;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct icmpv6_mib
{
......@@ -159,7 +159,7 @@ struct icmpv6_mib
unsigned long Icmp6OutGroupMembResponses;
unsigned long Icmp6OutGroupMembReductions;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct tcp_mib
{
......@@ -178,7 +178,7 @@ struct tcp_mib
unsigned long TcpInErrs;
unsigned long TcpOutRsts;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct udp_mib
{
......@@ -187,7 +187,7 @@ struct udp_mib
unsigned long UdpInErrors;
unsigned long UdpOutDatagrams;
unsigned long __pad[0];
} ____cacheline_aligned;
};
/* draft-ietf-sigtran-sctp-mib-07.txt */
struct sctp_mib
......@@ -216,7 +216,7 @@ struct sctp_mib
unsigned long SctpValCookieLife;
unsigned long SctpMaxInitRetr;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct linux_mib
{
......@@ -286,7 +286,7 @@ struct linux_mib
unsigned long TCPAbortFailed;
unsigned long TCPMemoryPressures;
unsigned long __pad[0];
} ____cacheline_aligned;
};
/*
......@@ -294,8 +294,25 @@ struct linux_mib
* addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock
* cycles). Wants new nonlocked_atomic_inc() primitives -AK
*/
#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++)
#define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++)
#define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++)
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) *name[2]
#define SNMP_STAT_USRPTR(name) (name[0])
#define SNMP_STAT_BHPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
(per_cpu_ptr(mib[0], smp_processor_id())->field++)
#define SNMP_INC_STATS_USER(mib, field) \
(per_cpu_ptr(mib[1], smp_processor_id())->field++)
#define SNMP_INC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field++)
#define SNMP_DEC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
(per_cpu_ptr(mib[0], smp_processor_id())->field += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
(per_cpu_ptr(mib[1], smp_processor_id())->field += addend)
#endif
......@@ -28,6 +28,7 @@
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <net/checksum.h>
#include <net/sock.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
......@@ -630,10 +631,11 @@ extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
extern struct proto tcp_prot;
extern struct tcp_mib tcp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
extern void tcp_put_port(struct sock *sk);
extern void __tcp_put_port(struct sock *sk);
......@@ -1399,7 +1401,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
TCP_DEC_STATS(TcpCurrEstab);
}
/* Change state AFTER socket is unhashed to avoid closed
......
......@@ -25,6 +25,7 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#define UDP_HTABLE_SIZE 128
......@@ -71,7 +72,7 @@ extern int udp_rcv(struct sk_buff *skb);
extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int udp_disconnect(struct sock *sk, int flags);
extern struct udp_mib udp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct udp_mib, udp_statistics);
#define UDP_INC_STATS(field) SNMP_INC_STATS(udp_statistics, field)
#define UDP_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_statistics, field)
#define UDP_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_statistics, field)
......
......@@ -113,7 +113,7 @@
#include <linux/mroute.h>
#endif
struct linux_mib net_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
#ifdef INET_REFCNT_DEBUG
atomic_t inet_sock_nr;
......@@ -1053,6 +1053,59 @@ static struct inet_protocol icmp_protocol = {
.handler = icmp_rcv,
};
static int __init init_ipv4_mibs(void)
{
int i;
net_statistics[0] =
kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
net_statistics[1] =
kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
ip_statistics[0] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
ip_statistics[1] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
icmp_statistics[0] =
kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
icmp_statistics[1] =
kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
tcp_statistics[0] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
tcp_statistics[1] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
udp_statistics[0] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
udp_statistics[1] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
if (!
(net_statistics[0] && net_statistics[1] && ip_statistics[0]
&& ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1]
&& udp_statistics[0] && udp_statistics[1]))
return -ENOMEM;
/* Set all the per cpu copies of the mibs to zero */
for (i = 0; i < NR_CPUS; i++) {
if (cpu_possible(i)) {
memset(per_cpu_ptr(net_statistics[0], i), 0,
sizeof (struct linux_mib));
memset(per_cpu_ptr(net_statistics[1], i), 0,
sizeof (struct linux_mib));
memset(per_cpu_ptr(ip_statistics[0], i), 0,
sizeof (struct ip_mib));
memset(per_cpu_ptr(ip_statistics[1], i), 0,
sizeof (struct ip_mib));
memset(per_cpu_ptr(icmp_statistics[0], i), 0,
sizeof (struct icmp_mib));
memset(per_cpu_ptr(icmp_statistics[1], i), 0,
sizeof (struct icmp_mib));
memset(per_cpu_ptr(tcp_statistics[0], i), 0,
sizeof (struct tcp_mib));
memset(per_cpu_ptr(tcp_statistics[1], i), 0,
sizeof (struct tcp_mib));
memset(per_cpu_ptr(udp_statistics[0], i), 0,
sizeof (struct udp_mib));
memset(per_cpu_ptr(udp_statistics[1], i), 0,
sizeof (struct udp_mib));
}
}
return 0;
}
int ipv4_proc_init(void);
static int __init inet_init(void)
......@@ -1148,6 +1201,12 @@ static int __init inet_init(void)
#if defined(CONFIG_IP_MROUTE)
ip_mr_init();
#endif
/*
* Initialise per-cpu ipv4 mibs
*/
if(init_ipv4_mibs())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ;
ipv4_proc_init();
return 0;
......
......@@ -113,7 +113,7 @@ struct icmp_bxm {
/*
* Statistics
*/
struct icmp_mib icmp_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */
......@@ -213,10 +213,9 @@ int sysctl_icmp_ratemask = 0x1818;
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control
{
unsigned long *output; /* Address to increment on output */
unsigned long *input; /* Address to increment on input */
struct icmp_control {
int output_off; /* Field offset for increment on output */
int input_off; /* Field offset for increment on input */
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
......@@ -343,10 +342,7 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
static void icmp_out_count(int type)
{
if (type <= NR_ICMP_TYPES) {
(icmp_pointers[type].output)[(smp_processor_id() * 2 +
!in_softirq()) *
sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
ICMP_INC_STATS_FIELD(icmp_pointers[type].output_off);
ICMP_INC_STATS(IcmpOutMsgs);
}
}
......@@ -1005,9 +1001,7 @@ int icmp_rcv(struct sk_buff *skb)
}
}
icmp_pointers[icmph->type].input[smp_processor_id() * 2 *
sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
ICMP_INC_STATS_BH_FIELD(icmp_pointers[icmph->type].input_off);
(icmp_pointers[icmph->type].handler)(skb);
drop:
......@@ -1024,122 +1018,122 @@ int icmp_rcv(struct sk_buff *skb)
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
/* ECHO REPLY (0) */
[0] = {
.output = &icmp_statistics[0].IcmpOutEchoReps,
.input = &icmp_statistics[0].IcmpInEchoReps,
.output_off = offsetof(struct icmp_mib, IcmpOutEchoReps),
.input_off = offsetof(struct icmp_mib, IcmpInEchoReps),
.handler = icmp_discard,
},
[1] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[2] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* DEST UNREACH (3) */
[3] = {
.output = &icmp_statistics[0].IcmpOutDestUnreachs,
.input = &icmp_statistics[0].IcmpInDestUnreachs,
.output_off = offsetof(struct icmp_mib, IcmpOutDestUnreachs),
.input_off = offsetof(struct icmp_mib, IcmpInDestUnreachs),
.handler = icmp_unreach,
.error = 1,
},
/* SOURCE QUENCH (4) */
[4] = {
.output = &icmp_statistics[0].IcmpOutSrcQuenchs,
.input = &icmp_statistics[0].IcmpInSrcQuenchs,
.output_off = offsetof(struct icmp_mib, IcmpOutSrcQuenchs),
.input_off = offsetof(struct icmp_mib, IcmpInSrcQuenchs),
icmp_unreach,
.error = 1,
},
/* REDIRECT (5) */
[5] = {
.output = &icmp_statistics[0].IcmpOutRedirects,
.input = &icmp_statistics[0].IcmpInRedirects,
.output_off = offsetof(struct icmp_mib, IcmpOutRedirects),
.input_off = offsetof(struct icmp_mib, IcmpInRedirects),
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[7] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* ECHO (8) */
[8] = {
.output = &icmp_statistics[0].IcmpOutEchos,
.input = &icmp_statistics[0].IcmpInEchos,
.output_off = offsetof(struct icmp_mib, IcmpOutEchos),
.input_off = offsetof(struct icmp_mib, IcmpInEchos),
.handler = icmp_echo,
.error = 0,
},
[9] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[10] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* TIME EXCEEDED (11) */
[11] = {
.output = &icmp_statistics[0].IcmpOutTimeExcds,
.input = &icmp_statistics[0].IcmpInTimeExcds,
.output_off = offsetof(struct icmp_mib, IcmpOutTimeExcds),
.input_off = offsetof(struct icmp_mib,IcmpInTimeExcds),
.handler = icmp_unreach,
.error = 1,
},
/* PARAMETER PROBLEM (12) */
[12] = {
.output = &icmp_statistics[0].IcmpOutParmProbs,
.input = &icmp_statistics[0].IcmpInParmProbs,
.output_off = offsetof(struct icmp_mib, IcmpOutParmProbs),
.input_off = offsetof(struct icmp_mib, IcmpInParmProbs),
.handler = icmp_unreach,
.error = 1,
},
/* TIMESTAMP (13) */
[13] = {
.output = &icmp_statistics[0].IcmpOutTimestamps,
.input = &icmp_statistics[0].IcmpInTimestamps,
.output_off = offsetof(struct icmp_mib, IcmpOutTimestamps),
.input_off = offsetof(struct icmp_mib, IcmpInTimestamps),
.handler = icmp_timestamp,
},
/* TIMESTAMP REPLY (14) */
[14] = {
.output = &icmp_statistics[0].IcmpOutTimestampReps,
.input = &icmp_statistics[0].IcmpInTimestampReps,
.output_off = offsetof(struct icmp_mib, IcmpOutTimestampReps),
.input_off = offsetof(struct icmp_mib, IcmpInTimestampReps),
.handler = icmp_discard,
},
/* INFO (15) */
[15] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].dummy,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
/* INFO REPLY (16) */
[16] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].dummy,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
/* ADDR MASK (17) */
[17] = {
.output = &icmp_statistics[0].IcmpOutAddrMasks,
.input = &icmp_statistics[0].IcmpInAddrMasks,
.output_off = offsetof(struct icmp_mib, IcmpOutAddrMasks),
.input_off = offsetof(struct icmp_mib, IcmpInAddrMasks),
.handler = icmp_address,
},
/* ADDR MASK REPLY (18) */
[18] = {
.output = &icmp_statistics[0].IcmpOutAddrMaskReps,
.input = &icmp_statistics[0].IcmpInAddrMaskReps,
.output_off = offsetof(struct icmp_mib, IcmpOutAddrMaskReps),
.input_off = offsetof(struct icmp_mib, IcmpInAddrMaskReps),
.handler = icmp_address_reply,
}
};
......
......@@ -149,7 +149,7 @@
* SNMP management statistics
*/
struct ip_mib ip_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct ip_mib, ip_statistics);
/*
* Process Router Attention IP option
......
......@@ -86,16 +86,21 @@ static struct file_operations sockstat_seq_fops = {
.release = single_release,
};
static unsigned long fold_field(unsigned long *begin, int sz, int nr)
static unsigned long
fold_field(void *mib[], int nr)
{
unsigned long res = 0;
int i;
sz /= sizeof(unsigned long);
for (i = 0; i < NR_CPUS; i++) {
res += begin[2 * i * sz + nr];
res += begin[(2 * i + 1) * sz + nr];
if (!cpu_possible(i))
continue;
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
}
return res;
}
......@@ -118,8 +123,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct ip_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)ip_statistics,
sizeof(struct ip_mib), i));
fold_field((void **) ip_statistics, i));
seq_printf(seq, "\nIcmp: InMsgs InErrors InDestUnreachs InTimeExcds "
"InParmProbs InSrcQuenchs InRedirects InEchos "
......@@ -132,8 +136,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct icmp_mib, dummy) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)icmp_statistics,
sizeof(struct icmp_mib), i));
fold_field((void **) icmp_statistics, i));
seq_printf(seq, "\nTcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens "
"PassiveOpens AttemptFails EstabResets CurrEstab "
......@@ -142,8 +145,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct tcp_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)tcp_statistics,
sizeof(struct tcp_mib), i));
fold_field((void **) tcp_statistics, i));
seq_printf(seq, "\nUdp: InDatagrams NoPorts InErrors OutDatagrams\n"
"Udp:");
......@@ -151,8 +153,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct udp_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)udp_statistics,
sizeof(struct udp_mib), i));
fold_field((void **) udp_statistics, i));
seq_putc(seq, '\n');
return 0;
......@@ -206,10 +207,10 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
" TCPAbortFailed TCPMemoryPressures\n"
"TcpExt:");
for (i = 0;
i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long); i++)
i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long);
i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)net_statistics,
sizeof(struct linux_mib), i));
fold_field((void **) net_statistics, i));
seq_putc(seq, '\n');
return 0;
}
......
......@@ -258,13 +258,15 @@
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
struct tcp_mib tcp_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
kmem_cache_t *tcp_openreq_cachep;
kmem_cache_t *tcp_bucket_cachep;
......@@ -1395,8 +1397,7 @@ static void tcp_prequeue_process(struct sock *sk)
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
net_statistics[smp_processor_id() * 2 + 1].TCPPrequeued +=
skb_queue_len(&tp->ucopy.prequeue);
NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
......@@ -1676,7 +1677,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromBacklog += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
len -= chunk;
copied += chunk;
}
......@@ -1687,7 +1688,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromPrequeue += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
......@@ -1770,7 +1771,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromPrequeue += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
......
......@@ -3043,8 +3043,8 @@ static int tcp_prune_queue(struct sock *sk)
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
net_statistics[smp_processor_id() * 2].OfoPruned +=
skb_queue_len(&tp->out_of_order_queue);
NET_ADD_STATS_BH(OfoPruned,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will
......
......@@ -464,7 +464,7 @@ static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
if ((tcp_tw_count -= killed) != 0)
mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
net_statistics[smp_processor_id()*2].TimeWaited += killed;
NET_ADD_STATS_BH(TimeWaited, killed);
out:
spin_unlock(&tw_death_lock);
}
......@@ -628,7 +628,7 @@ void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
out:
if ((tcp_tw_count -= killed) == 0)
del_timer(&tcp_tw_timer);
net_statistics[smp_processor_id()*2].TimeWaitKilled += killed;
NET_ADD_STATS_BH(TimeWaitKilled, killed);
spin_unlock(&tw_death_lock);
}
......
......@@ -237,7 +237,8 @@ static void tcp_delack_timer(unsigned long data)
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
net_statistics[smp_processor_id()*2].TCPSchedulerFailed += skb_queue_len(&tp->ucopy.prequeue);
NET_ADD_STATS_BH(TCPSchedulerFailed,
skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->backlog_rcv(sk, skb);
......
......@@ -110,7 +110,7 @@
* Snmp MIB for the UDP layer
*/
struct udp_mib udp_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
struct sock *udp_hash[UDP_HTABLE_SIZE];
rwlock_t udp_hash_lock = RW_LOCK_UNLOCKED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment