Commit b4d59dc8 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 6c81f168 a39dd8bd
......@@ -2850,6 +2850,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
return -1;
}
#warning MAX_ADDR_LEN is now 32 bytes instead of 8, please fix this as appropriate
memcpy(dev->dev_addr, addr, MAX_ADDR_LEN);
#else
get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
......
......@@ -40,6 +40,7 @@
#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
#define ARPHRD_IEEE1394 24 /* IEEE 1394 IPv4 - RFC 2734 */
#define ARPHRD_EUI64 27 /* EUI-64 */
#define ARPHRD_INFINIBAND 32 /* InfiniBand */
/* Dummy types for non ARP hardware */
#define ARPHRD_SLIP 256
......
......@@ -66,7 +66,7 @@ struct vlan_group;
#endif
#define MAX_ADDR_LEN 8 /* Largest hardware address length */
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
/*
* Compute the worst case header length according to the protocols
......
......@@ -17,6 +17,7 @@
#define RTM_NEWLINK (RTM_BASE+0)
#define RTM_DELLINK (RTM_BASE+1)
#define RTM_GETLINK (RTM_BASE+2)
#define RTM_SETLINK (RTM_BASE+3)
#define RTM_NEWADDR (RTM_BASE+4)
#define RTM_DELADDR (RTM_BASE+5)
......
......@@ -23,6 +23,7 @@
#include <net/sock.h>
#include <net/protocol.h>
#include <net/snmp.h>
#include <linux/ip.h>
struct icmp_err {
......@@ -31,10 +32,22 @@ struct icmp_err {
};
extern struct icmp_err icmp_err_convert[];
extern struct icmp_mib icmp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct icmp_mib, icmp_statistics);
#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
#define ICMP_INC_STATS_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[!in_softirq()],\
smp_processor_id())) + offt))++;
#define ICMP_INC_STATS_BH_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[0], \
smp_processor_id())) + offt))++;
#define ICMP_INC_STATS_USER_FIELD(offt) \
(*((unsigned long *) ((void *) \
per_cpu_ptr(icmp_statistics[1], \
smp_processor_id())) + offt))++;
extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
extern int icmp_rcv(struct sk_buff *skb);
......
......@@ -149,14 +149,16 @@ struct ipv4_config
};
extern struct ipv4_config ipv4_config;
extern struct ip_mib ip_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct ip_mib, ip_statistics);
#define IP_INC_STATS(field) SNMP_INC_STATS(ip_statistics, field)
#define IP_INC_STATS_BH(field) SNMP_INC_STATS_BH(ip_statistics, field)
#define IP_INC_STATS_USER(field) SNMP_INC_STATS_USER(ip_statistics, field)
extern struct linux_mib net_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
#define NET_INC_STATS(field) SNMP_INC_STATS(net_statistics, field)
#define NET_INC_STATS_BH(field) SNMP_INC_STATS_BH(net_statistics, field)
#define NET_INC_STATS_USER(field) SNMP_INC_STATS_USER(net_statistics, field)
#define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd)
#define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd)
extern int sysctl_local_port_range[2];
extern int sysctl_ip_default_ttl;
......
......@@ -19,6 +19,7 @@
#include <asm/hardirq.h>
#include <net/ndisc.h>
#include <net/flow.h>
#include <net/snmp.h>
#define SIN6_LEN_RFC2133 24
......@@ -105,15 +106,19 @@ struct frag_hdr {
/* sysctls */
extern int sysctl_ipv6_bindv6only;
extern struct ipv6_mib ipv6_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct ipv6_mib, ipv6_statistics);
#define IP6_INC_STATS(field) SNMP_INC_STATS(ipv6_statistics, field)
#define IP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(ipv6_statistics, field)
#define IP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(ipv6_statistics, field)
extern struct icmpv6_mib icmpv6_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
#define ICMP6_INC_STATS(field) SNMP_INC_STATS(icmpv6_statistics, field)
#define ICMP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmpv6_statistics, field)
#define ICMP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmpv6_statistics, field)
extern struct udp_mib udp_stats_in6[NR_CPUS*2];
#define ICMP6_STATS_PTR_BH(field) \
(& \
((per_cpu_ptr(icmpv6_statistics[0], smp_processor_id()))-> \
field))
DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6);
#define UDP6_INC_STATS(field) SNMP_INC_STATS(udp_stats_in6, field)
#define UDP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_stats_in6, field)
#define UDP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_stats_in6, field)
......
......@@ -102,7 +102,11 @@ struct rt_cache_stat
unsigned int gc_ignored;
unsigned int gc_goal_miss;
unsigned int gc_dst_overflow;
} ____cacheline_aligned_in_smp;
};
extern struct rt_cache_stat *rt_cache_stat;
#define RT_CACHE_STAT_INC(field) \
(per_cpu_ptr(rt_cache_stat, smp_processor_id())->field++)
extern struct ip_rt_acct *ip_rt_acct;
......
......@@ -203,7 +203,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#define SCTP_SOCK_SLEEP_POST(sk) SOCK_SLEEP_POST(sk)
/* SCTP SNMP MIB stats handlers */
extern struct sctp_mib sctp_statistics[NR_CPUS * 2];
DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
......
......@@ -62,7 +62,7 @@ struct ip_mib
unsigned long IpFragFails;
unsigned long IpFragCreates;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct ipv6_mib
{
......@@ -89,7 +89,7 @@ struct ipv6_mib
unsigned long Ip6InMcastPkts;
unsigned long Ip6OutMcastPkts;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct icmp_mib
{
......@@ -121,7 +121,7 @@ struct icmp_mib
unsigned long IcmpOutAddrMaskReps;
unsigned long dummy;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct icmpv6_mib
{
......@@ -159,7 +159,7 @@ struct icmpv6_mib
unsigned long Icmp6OutGroupMembResponses;
unsigned long Icmp6OutGroupMembReductions;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct tcp_mib
{
......@@ -178,7 +178,7 @@ struct tcp_mib
unsigned long TcpInErrs;
unsigned long TcpOutRsts;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct udp_mib
{
......@@ -187,7 +187,7 @@ struct udp_mib
unsigned long UdpInErrors;
unsigned long UdpOutDatagrams;
unsigned long __pad[0];
} ____cacheline_aligned;
};
/* draft-ietf-sigtran-sctp-mib-07.txt */
struct sctp_mib
......@@ -216,7 +216,7 @@ struct sctp_mib
unsigned long SctpValCookieLife;
unsigned long SctpMaxInitRetr;
unsigned long __pad[0];
} ____cacheline_aligned;
};
struct linux_mib
{
......@@ -286,7 +286,7 @@ struct linux_mib
unsigned long TCPAbortFailed;
unsigned long TCPMemoryPressures;
unsigned long __pad[0];
} ____cacheline_aligned;
};
/*
......@@ -294,8 +294,25 @@ struct linux_mib
* addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock
* cycles). Wants new nonlocked_atomic_inc() primitives -AK
*/
#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++)
#define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++)
#define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++)
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) *name[2]
#define SNMP_STAT_USRPTR(name) (name[0])
#define SNMP_STAT_BHPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
(per_cpu_ptr(mib[0], smp_processor_id())->field++)
#define SNMP_INC_STATS_USER(mib, field) \
(per_cpu_ptr(mib[1], smp_processor_id())->field++)
#define SNMP_INC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field++)
#define SNMP_DEC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
(per_cpu_ptr(mib[0], smp_processor_id())->field += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
(per_cpu_ptr(mib[1], smp_processor_id())->field += addend)
#endif
......@@ -28,6 +28,7 @@
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <net/checksum.h>
#include <net/sock.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
......@@ -630,10 +631,11 @@ extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
extern struct proto tcp_prot;
extern struct tcp_mib tcp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
extern void tcp_put_port(struct sock *sk);
extern void __tcp_put_port(struct sock *sk);
......@@ -1399,7 +1401,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
TCP_DEC_STATS(TcpCurrEstab);
}
/* Change state AFTER socket is unhashed to avoid closed
......
......@@ -25,6 +25,7 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#define UDP_HTABLE_SIZE 128
......@@ -71,7 +72,7 @@ extern int udp_rcv(struct sk_buff *skb);
extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int udp_disconnect(struct sock *sk, int flags);
extern struct udp_mib udp_statistics[NR_CPUS*2];
DECLARE_SNMP_STAT(struct udp_mib, udp_statistics);
#define UDP_INC_STATS(field) SNMP_INC_STATS(udp_statistics, field)
#define UDP_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_statistics, field)
#define UDP_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_statistics, field)
......
......@@ -2133,7 +2133,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
case SIOCGIFHWADDR:
memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
MAX_ADDR_LEN);
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
ifr->ifr_hwaddr.sa_family = dev->type;
return 0;
......@@ -2154,7 +2154,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
MAX_ADDR_LEN);
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return 0;
......
......@@ -220,6 +220,40 @@ int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct ifinfomsg *ifm = NLMSG_DATA(nlh);
struct rtattr **ida = arg;
struct net_device *dev;
int err;
dev = dev_get_by_index(ifm->ifi_index);
if (!dev)
return -ENODEV;
err = -EINVAL;
if (ida[IFLA_ADDRESS - 1]) {
if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
goto out;
memcpy(dev->dev_addr, RTA_DATA(ida[IFLA_ADDRESS - 1]),
dev->addr_len);
}
if (ida[IFLA_BROADCAST - 1]) {
if (ida[IFLA_BROADCAST - 1]->rta_len != RTA_LENGTH(dev->addr_len))
goto out;
memcpy(dev->broadcast, RTA_DATA(ida[IFLA_BROADCAST - 1]),
dev->addr_len);
}
err = 0;
out:
dev_put(dev);
return err;
}
int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
......@@ -457,33 +491,15 @@ static void rtnetlink_rcv(struct sock *sk, int len)
static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
{
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, rtnetlink_dump_ifinfo, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, rtnetlink_dump_all, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, rtnetlink_dump_all, },
{ NULL, NULL, },
{ neigh_add, NULL, },
{ neigh_delete, NULL, },
{ NULL, neigh_dump_info, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
[RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo },
[RTM_SETLINK - RTM_BASE] = { .doit = do_setlink },
[RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
[RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
[RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add },
[RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete },
[RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info }
};
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
......
......@@ -324,7 +324,9 @@ void __kfree_skb(struct sk_buff *skb)
}
dst_release(skb->dst);
#ifdef CONFIG_INET
secpath_put(skb->sp);
#endif
if(skb->destructor) {
if (in_irq())
printk(KERN_WARNING "Warning: kfree_skb on "
......@@ -378,7 +380,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
C(dst);
dst_clone(n->dst);
C(sp);
#ifdef CONFIG_INET
secpath_get(n->sp);
#endif
memcpy(n->cb, skb->cb, sizeof(skb->cb));
C(len);
C(data_len);
......@@ -438,7 +442,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->priority = old->priority;
new->protocol = old->protocol;
new->dst = dst_clone(old->dst);
#ifdef CONFIG_INET
new->sp = secpath_get(old->sp);
#endif
new->h.raw = old->h.raw + offset;
new->nh.raw = old->nh.raw + offset;
new->mac.raw = old->mac.raw + offset;
......
......@@ -113,7 +113,7 @@
#include <linux/mroute.h>
#endif
struct linux_mib net_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
#ifdef INET_REFCNT_DEBUG
atomic_t inet_sock_nr;
......@@ -1053,6 +1053,59 @@ static struct inet_protocol icmp_protocol = {
.handler = icmp_rcv,
};
static int __init init_ipv4_mibs(void)
{
int i;
net_statistics[0] =
kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
net_statistics[1] =
kmalloc_percpu(sizeof (struct linux_mib), GFP_KERNEL);
ip_statistics[0] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
ip_statistics[1] = kmalloc_percpu(sizeof (struct ip_mib), GFP_KERNEL);
icmp_statistics[0] =
kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
icmp_statistics[1] =
kmalloc_percpu(sizeof (struct icmp_mib), GFP_KERNEL);
tcp_statistics[0] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
tcp_statistics[1] = kmalloc_percpu(sizeof (struct tcp_mib), GFP_KERNEL);
udp_statistics[0] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
udp_statistics[1] = kmalloc_percpu(sizeof (struct udp_mib), GFP_KERNEL);
if (!
(net_statistics[0] && net_statistics[1] && ip_statistics[0]
&& ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1]
&& udp_statistics[0] && udp_statistics[1]))
return -ENOMEM;
/* Set all the per cpu copies of the mibs to zero */
for (i = 0; i < NR_CPUS; i++) {
if (cpu_possible(i)) {
memset(per_cpu_ptr(net_statistics[0], i), 0,
sizeof (struct linux_mib));
memset(per_cpu_ptr(net_statistics[1], i), 0,
sizeof (struct linux_mib));
memset(per_cpu_ptr(ip_statistics[0], i), 0,
sizeof (struct ip_mib));
memset(per_cpu_ptr(ip_statistics[1], i), 0,
sizeof (struct ip_mib));
memset(per_cpu_ptr(icmp_statistics[0], i), 0,
sizeof (struct icmp_mib));
memset(per_cpu_ptr(icmp_statistics[1], i), 0,
sizeof (struct icmp_mib));
memset(per_cpu_ptr(tcp_statistics[0], i), 0,
sizeof (struct tcp_mib));
memset(per_cpu_ptr(tcp_statistics[1], i), 0,
sizeof (struct tcp_mib));
memset(per_cpu_ptr(udp_statistics[0], i), 0,
sizeof (struct udp_mib));
memset(per_cpu_ptr(udp_statistics[1], i), 0,
sizeof (struct udp_mib));
}
}
return 0;
}
int ipv4_proc_init(void);
static int __init inet_init(void)
......@@ -1148,6 +1201,12 @@ static int __init inet_init(void)
#if defined(CONFIG_IP_MROUTE)
ip_mr_init();
#endif
/*
* Initialise per-cpu ipv4 mibs
*/
if(init_ipv4_mibs())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ;
ipv4_proc_init();
return 0;
......
......@@ -113,7 +113,7 @@ struct icmp_bxm {
/*
* Statistics
*/
struct icmp_mib icmp_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */
......@@ -213,10 +213,9 @@ int sysctl_icmp_ratemask = 0x1818;
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control
{
unsigned long *output; /* Address to increment on output */
unsigned long *input; /* Address to increment on input */
struct icmp_control {
int output_off; /* Field offset for increment on output */
int input_off; /* Field offset for increment on input */
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
......@@ -343,10 +342,7 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
static void icmp_out_count(int type)
{
if (type <= NR_ICMP_TYPES) {
(icmp_pointers[type].output)[(smp_processor_id() * 2 +
!in_softirq()) *
sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
ICMP_INC_STATS_FIELD(icmp_pointers[type].output_off);
ICMP_INC_STATS(IcmpOutMsgs);
}
}
......@@ -1005,9 +1001,7 @@ int icmp_rcv(struct sk_buff *skb)
}
}
icmp_pointers[icmph->type].input[smp_processor_id() * 2 *
sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
ICMP_INC_STATS_BH_FIELD(icmp_pointers[icmph->type].input_off);
(icmp_pointers[icmph->type].handler)(skb);
drop:
......@@ -1024,122 +1018,122 @@ int icmp_rcv(struct sk_buff *skb)
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
/* ECHO REPLY (0) */
[0] = {
.output = &icmp_statistics[0].IcmpOutEchoReps,
.input = &icmp_statistics[0].IcmpInEchoReps,
.output_off = offsetof(struct icmp_mib, IcmpOutEchoReps),
.input_off = offsetof(struct icmp_mib, IcmpInEchoReps),
.handler = icmp_discard,
},
[1] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[2] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* DEST UNREACH (3) */
[3] = {
.output = &icmp_statistics[0].IcmpOutDestUnreachs,
.input = &icmp_statistics[0].IcmpInDestUnreachs,
.output_off = offsetof(struct icmp_mib, IcmpOutDestUnreachs),
.input_off = offsetof(struct icmp_mib, IcmpInDestUnreachs),
.handler = icmp_unreach,
.error = 1,
},
/* SOURCE QUENCH (4) */
[4] = {
.output = &icmp_statistics[0].IcmpOutSrcQuenchs,
.input = &icmp_statistics[0].IcmpInSrcQuenchs,
.output_off = offsetof(struct icmp_mib, IcmpOutSrcQuenchs),
.input_off = offsetof(struct icmp_mib, IcmpInSrcQuenchs),
icmp_unreach,
.error = 1,
},
/* REDIRECT (5) */
[5] = {
.output = &icmp_statistics[0].IcmpOutRedirects,
.input = &icmp_statistics[0].IcmpInRedirects,
.output_off = offsetof(struct icmp_mib, IcmpOutRedirects),
.input_off = offsetof(struct icmp_mib, IcmpInRedirects),
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[7] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* ECHO (8) */
[8] = {
.output = &icmp_statistics[0].IcmpOutEchos,
.input = &icmp_statistics[0].IcmpInEchos,
.output_off = offsetof(struct icmp_mib, IcmpOutEchos),
.input_off = offsetof(struct icmp_mib, IcmpInEchos),
.handler = icmp_echo,
.error = 0,
},
[9] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[10] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].IcmpInErrors,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
/* TIME EXCEEDED (11) */
[11] = {
.output = &icmp_statistics[0].IcmpOutTimeExcds,
.input = &icmp_statistics[0].IcmpInTimeExcds,
.output_off = offsetof(struct icmp_mib, IcmpOutTimeExcds),
.input_off = offsetof(struct icmp_mib,IcmpInTimeExcds),
.handler = icmp_unreach,
.error = 1,
},
/* PARAMETER PROBLEM (12) */
[12] = {
.output = &icmp_statistics[0].IcmpOutParmProbs,
.input = &icmp_statistics[0].IcmpInParmProbs,
.output_off = offsetof(struct icmp_mib, IcmpOutParmProbs),
.input_off = offsetof(struct icmp_mib, IcmpInParmProbs),
.handler = icmp_unreach,
.error = 1,
},
/* TIMESTAMP (13) */
[13] = {
.output = &icmp_statistics[0].IcmpOutTimestamps,
.input = &icmp_statistics[0].IcmpInTimestamps,
.output_off = offsetof(struct icmp_mib, IcmpOutTimestamps),
.input_off = offsetof(struct icmp_mib, IcmpInTimestamps),
.handler = icmp_timestamp,
},
/* TIMESTAMP REPLY (14) */
[14] = {
.output = &icmp_statistics[0].IcmpOutTimestampReps,
.input = &icmp_statistics[0].IcmpInTimestampReps,
.output_off = offsetof(struct icmp_mib, IcmpOutTimestampReps),
.input_off = offsetof(struct icmp_mib, IcmpInTimestampReps),
.handler = icmp_discard,
},
/* INFO (15) */
[15] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].dummy,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
/* INFO REPLY (16) */
[16] = {
.output = &icmp_statistics[0].dummy,
.input = &icmp_statistics[0].dummy,
.output_off = offsetof(struct icmp_mib, dummy),
.input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
/* ADDR MASK (17) */
[17] = {
.output = &icmp_statistics[0].IcmpOutAddrMasks,
.input = &icmp_statistics[0].IcmpInAddrMasks,
.output_off = offsetof(struct icmp_mib, IcmpOutAddrMasks),
.input_off = offsetof(struct icmp_mib, IcmpInAddrMasks),
.handler = icmp_address,
},
/* ADDR MASK REPLY (18) */
[18] = {
.output = &icmp_statistics[0].IcmpOutAddrMaskReps,
.input = &icmp_statistics[0].IcmpInAddrMaskReps,
.output_off = offsetof(struct icmp_mib, IcmpOutAddrMaskReps),
.input_off = offsetof(struct icmp_mib, IcmpInAddrMaskReps),
.handler = icmp_address_reply,
}
};
......
......@@ -149,7 +149,7 @@
* SNMP management statistics
*/
struct ip_mib ip_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct ip_mib, ip_statistics);
/*
* Process Router Attention IP option
......
......@@ -86,16 +86,21 @@ static struct file_operations sockstat_seq_fops = {
.release = single_release,
};
static unsigned long fold_field(unsigned long *begin, int sz, int nr)
static unsigned long
fold_field(void *mib[], int nr)
{
unsigned long res = 0;
int i;
sz /= sizeof(unsigned long);
for (i = 0; i < NR_CPUS; i++) {
res += begin[2 * i * sz + nr];
res += begin[(2 * i + 1) * sz + nr];
if (!cpu_possible(i))
continue;
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
}
return res;
}
......@@ -118,8 +123,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct ip_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)ip_statistics,
sizeof(struct ip_mib), i));
fold_field((void **) ip_statistics, i));
seq_printf(seq, "\nIcmp: InMsgs InErrors InDestUnreachs InTimeExcds "
"InParmProbs InSrcQuenchs InRedirects InEchos "
......@@ -132,8 +136,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct icmp_mib, dummy) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)icmp_statistics,
sizeof(struct icmp_mib), i));
fold_field((void **) icmp_statistics, i));
seq_printf(seq, "\nTcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens "
"PassiveOpens AttemptFails EstabResets CurrEstab "
......@@ -142,8 +145,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct tcp_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)tcp_statistics,
sizeof(struct tcp_mib), i));
fold_field((void **) tcp_statistics, i));
seq_printf(seq, "\nUdp: InDatagrams NoPorts InErrors OutDatagrams\n"
"Udp:");
......@@ -151,8 +153,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0;
i < offsetof(struct udp_mib, __pad) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)udp_statistics,
sizeof(struct udp_mib), i));
fold_field((void **) udp_statistics, i));
seq_putc(seq, '\n');
return 0;
......@@ -206,10 +207,10 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
" TCPAbortFailed TCPMemoryPressures\n"
"TcpExt:");
for (i = 0;
i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long); i++)
i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long);
i++)
seq_printf(seq, " %lu",
fold_field((unsigned long *)net_statistics,
sizeof(struct linux_mib), i));
fold_field((void **) net_statistics, i));
seq_putc(seq, '\n');
return 0;
}
......
......@@ -196,7 +196,7 @@ static struct rt_hash_bucket *rt_hash_table;
static unsigned rt_hash_mask;
static int rt_hash_log;
struct rt_cache_stat rt_cache_stat[NR_CPUS];
struct rt_cache_stat *rt_cache_stat;
static int rt_intern_hash(unsigned hash, struct rtable *rth,
struct rtable **res);
......@@ -318,24 +318,26 @@ static int rt_cache_stat_get_info(char *buffer, char **start, off_t offset, int
int len = 0;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries,
rt_cache_stat[i].in_hit,
rt_cache_stat[i].in_slow_tot,
rt_cache_stat[i].in_slow_mc,
rt_cache_stat[i].in_no_route,
rt_cache_stat[i].in_brd,
rt_cache_stat[i].in_martian_dst,
rt_cache_stat[i].in_martian_src,
rt_cache_stat[i].out_hit,
rt_cache_stat[i].out_slow_tot,
rt_cache_stat[i].out_slow_mc,
rt_cache_stat[i].gc_total,
rt_cache_stat[i].gc_ignored,
rt_cache_stat[i].gc_goal_miss,
rt_cache_stat[i].gc_dst_overflow
per_cpu_ptr(rt_cache_stat, i)->in_hit,
per_cpu_ptr(rt_cache_stat, i)->in_slow_tot,
per_cpu_ptr(rt_cache_stat, i)->in_slow_mc,
per_cpu_ptr(rt_cache_stat, i)->in_no_route,
per_cpu_ptr(rt_cache_stat, i)->in_brd,
per_cpu_ptr(rt_cache_stat, i)->in_martian_dst,
per_cpu_ptr(rt_cache_stat, i)->in_martian_src,
per_cpu_ptr(rt_cache_stat, i)->out_hit,
per_cpu_ptr(rt_cache_stat, i)->out_slow_tot,
per_cpu_ptr(rt_cache_stat, i)->out_slow_mc,
per_cpu_ptr(rt_cache_stat, i)->gc_total,
per_cpu_ptr(rt_cache_stat, i)->gc_ignored,
per_cpu_ptr(rt_cache_stat, i)->gc_goal_miss,
per_cpu_ptr(rt_cache_stat, i)->gc_dst_overflow
);
}
......@@ -591,11 +593,11 @@ static int rt_garbage_collect(void)
* do not make it too frequently.
*/
rt_cache_stat[smp_processor_id()].gc_total++;
RT_CACHE_STAT_INC(gc_total);
if (now - last_gc < ip_rt_gc_min_interval &&
atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
rt_cache_stat[smp_processor_id()].gc_ignored++;
RT_CACHE_STAT_INC(gc_ignored);
goto out;
}
......@@ -663,7 +665,7 @@ static int rt_garbage_collect(void)
We will not spin here for long time in any case.
*/
rt_cache_stat[smp_processor_id()].gc_goal_miss++;
RT_CACHE_STAT_INC(gc_goal_miss);
if (expire == 0)
break;
......@@ -682,7 +684,7 @@ static int rt_garbage_collect(void)
goto out;
if (net_ratelimit())
printk(KERN_WARNING "dst cache overflow\n");
rt_cache_stat[smp_processor_id()].gc_dst_overflow++;
RT_CACHE_STAT_INC(gc_dst_overflow);
return 1;
work_done:
......@@ -1400,7 +1402,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
rth->u.dst.input = ip_mr_input;
#endif
rt_cache_stat[smp_processor_id()].in_slow_mc++;
RT_CACHE_STAT_INC(in_slow_mc);
in_dev_put(in_dev);
hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos);
......@@ -1485,7 +1487,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
}
free_res = 1;
rt_cache_stat[smp_processor_id()].in_slow_tot++;
RT_CACHE_STAT_INC(in_slow_tot);
#ifdef CONFIG_IP_ROUTE_NAT
/* Policy is applied before mapping destination,
......@@ -1642,7 +1644,7 @@ out: return err;
}
flags |= RTCF_BROADCAST;
res.type = RTN_BROADCAST;
rt_cache_stat[smp_processor_id()].in_brd++;
RT_CACHE_STAT_INC(in_brd);
local_input:
rth = dst_alloc(&ipv4_dst_ops);
......@@ -1687,7 +1689,7 @@ out: return err;
goto intern;
no_route:
rt_cache_stat[smp_processor_id()].in_no_route++;
RT_CACHE_STAT_INC(in_no_route);
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
goto local_input;
......@@ -1696,7 +1698,7 @@ out: return err;
* Do not cache martian addresses: they should be logged (RFC1812)
*/
martian_destination:
rt_cache_stat[smp_processor_id()].in_martian_dst++;
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
......@@ -1713,7 +1715,7 @@ out: return err;
martian_source:
rt_cache_stat[smp_processor_id()].in_martian_src++;
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
/*
......@@ -1763,7 +1765,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
rt_cache_stat[smp_processor_id()].in_hit++;
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
skb->dst = (struct dst_entry*)rth;
return 0;
......@@ -2060,7 +2062,7 @@ int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
rth->u.dst.output=ip_output;
rt_cache_stat[smp_processor_id()].out_slow_tot++;
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL) {
rth->u.dst.input = ip_local_deliver;
......@@ -2070,7 +2072,7 @@ int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
rth->rt_spec_dst = fl.fl4_src;
if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) {
rth->u.dst.output = ip_mc_output;
rt_cache_stat[smp_processor_id()].out_slow_mc++;
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
if (res.type == RTN_MULTICAST) {
......@@ -2129,7 +2131,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
rt_cache_stat[smp_processor_id()].out_hit++;
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock();
*rp = rth;
return 0;
......@@ -2650,6 +2652,11 @@ int __init ip_rt_init(void)
ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
ip_rt_max_size = (rt_hash_mask + 1) * 16;
rt_cache_stat = kmalloc_percpu(sizeof (struct rt_cache_stat),
GFP_KERNEL);
if (!rt_cache_stat)
goto out_enomem1;
devinet_init();
ip_fib_init();
......@@ -2675,6 +2682,8 @@ int __init ip_rt_init(void)
out:
return rc;
out_enomem:
kfree_percpu(rt_cache_stat);
out_enomem1:
rc = -ENOMEM;
goto out;
}
......@@ -258,13 +258,15 @@
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
struct tcp_mib tcp_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
kmem_cache_t *tcp_openreq_cachep;
kmem_cache_t *tcp_bucket_cachep;
......@@ -1395,8 +1397,7 @@ static void tcp_prequeue_process(struct sock *sk)
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
net_statistics[smp_processor_id() * 2 + 1].TCPPrequeued +=
skb_queue_len(&tp->ucopy.prequeue);
NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
......@@ -1676,7 +1677,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromBacklog += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
len -= chunk;
copied += chunk;
}
......@@ -1687,7 +1688,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromPrequeue += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
......@@ -1770,7 +1771,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
net_statistics[smp_processor_id() * 2 + 1].TCPDirectCopyFromPrequeue += chunk;
NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
......
......@@ -3043,8 +3043,8 @@ static int tcp_prune_queue(struct sock *sk)
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
net_statistics[smp_processor_id() * 2].OfoPruned +=
skb_queue_len(&tp->out_of_order_queue);
NET_ADD_STATS_BH(OfoPruned,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will
......
......@@ -464,7 +464,7 @@ static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
if ((tcp_tw_count -= killed) != 0)
mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
net_statistics[smp_processor_id()*2].TimeWaited += killed;
NET_ADD_STATS_BH(TimeWaited, killed);
out:
spin_unlock(&tw_death_lock);
}
......@@ -628,7 +628,7 @@ void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
out:
if ((tcp_tw_count -= killed) == 0)
del_timer(&tcp_tw_timer);
net_statistics[smp_processor_id()*2].TimeWaitKilled += killed;
NET_ADD_STATS_BH(TimeWaitKilled, killed);
spin_unlock(&tw_death_lock);
}
......
......@@ -237,7 +237,8 @@ static void tcp_delack_timer(unsigned long data)
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
net_statistics[smp_processor_id()*2].TCPSchedulerFailed += skb_queue_len(&tp->ucopy.prequeue);
NET_ADD_STATS_BH(TCPSchedulerFailed,
skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->backlog_rcv(sk, skb);
......
......@@ -110,7 +110,7 @@
* Snmp MIB for the UDP layer
*/
struct udp_mib udp_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
struct sock *udp_hash[UDP_HTABLE_SIZE];
rwlock_t udp_hash_lock = RW_LOCK_UNLOCKED;
......
......@@ -619,6 +619,81 @@ inet6_unregister_protosw(struct inet_protosw *p)
inet_unregister_protosw(p);
}
static int __init init_ipv6_mibs(void)
{
int i;
ipv6_statistics[0] = kmalloc_percpu(sizeof (struct ipv6_mib),
GFP_KERNEL);
if (!ipv6_statistics[0])
goto err_ip_mib0;
ipv6_statistics[1] = kmalloc_percpu(sizeof (struct ipv6_mib),
GFP_KERNEL);
if (!ipv6_statistics[1])
goto err_ip_mib1;
icmpv6_statistics[0] = kmalloc_percpu(sizeof (struct icmpv6_mib),
GFP_KERNEL);
if (!icmpv6_statistics[0])
goto err_icmp_mib0;
icmpv6_statistics[1] = kmalloc_percpu(sizeof (struct icmpv6_mib),
GFP_KERNEL);
if (!icmpv6_statistics[1])
goto err_icmp_mib1;
udp_stats_in6[0] = kmalloc_percpu(sizeof (struct udp_mib),
GFP_KERNEL);
if (!udp_stats_in6[0])
goto err_udp_mib0;
udp_stats_in6[1] = kmalloc_percpu(sizeof (struct udp_mib),
GFP_KERNEL);
if (!udp_stats_in6[1])
goto err_udp_mib1;
/* Zero all percpu versions of the mibs */
for (i = 0; i < NR_CPUS; i++) {
if (cpu_possible(i)) {
memset(per_cpu_ptr(ipv6_statistics[0], i), 0,
sizeof (struct ipv6_mib));
memset(per_cpu_ptr(ipv6_statistics[1], i), 0,
sizeof (struct ipv6_mib));
memset(per_cpu_ptr(icmpv6_statistics[0], i), 0,
sizeof (struct icmpv6_mib));
memset(per_cpu_ptr(icmpv6_statistics[1], i), 0,
sizeof (struct icmpv6_mib));
memset(per_cpu_ptr(udp_stats_in6[0], i), 0,
sizeof (struct udp_mib));
memset(per_cpu_ptr(udp_stats_in6[1], i), 0,
sizeof (struct udp_mib));
}
}
return 0;
err_udp_mib1:
kfree_percpu(udp_stats_in6[0]);
err_udp_mib0:
kfree_percpu(icmpv6_statistics[1]);
err_icmp_mib1:
kfree_percpu(icmpv6_statistics[0]);
err_icmp_mib0:
kfree_percpu(ipv6_statistics[1]);
err_ip_mib1:
kfree_percpu(ipv6_statistics[0]);
err_ip_mib0:
return -ENOMEM;
}
static void __exit cleanup_ipv6_mibs(void)
{
kfree_percpu(ipv6_statistics[0]);
kfree_percpu(ipv6_statistics[1]);
kfree_percpu(icmpv6_statistics[0]);
kfree_percpu(icmpv6_statistics[1]);
kfree_percpu(udp_stats_in6[0]);
kfree_percpu(udp_stats_in6[1]);
}
static int __init inet6_init(void)
{
struct sk_buff *dummy_skb;
......@@ -669,6 +744,11 @@ static int __init inet6_init(void)
*/
(void) sock_register(&inet6_family_ops);
/* Initialise ipv6 mibs */
err = init_ipv6_mibs();
if (err)
goto init_mib_fail;
/*
* ipngwg API draft makes clear that the correct semantics
* for TCP and UDP is to consider one TCP and UDP instance
......@@ -735,6 +815,8 @@ static int __init inet6_init(void)
#if defined(MODULE) && defined(CONFIG_SYSCTL)
ipv6_sysctl_unregister();
#endif
cleanup_ipv6_mibs();
init_mib_fail:
return err;
}
module_init(inet6_init);
......@@ -765,6 +847,7 @@ static void inet6_exit(void)
#ifdef CONFIG_SYSCTL
ipv6_sysctl_unregister();
#endif
cleanup_ipv6_mibs();
}
module_exit(inet6_exit);
#endif /* MODULE */
......
......@@ -65,7 +65,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
struct icmpv6_mib icmpv6_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
/*
* ICMP socket for flow control.
......@@ -377,7 +377,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
ip6_build_xmit(sk, icmpv6_getfrag, &msg, &fl, len, NULL, -1,
MSG_DONTWAIT);
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
(&(icmpv6_statistics[smp_processor_id()*2].Icmp6OutDestUnreachs))[type-1]++;
ICMP6_STATS_PTR_BH(Icmp6OutDestUnreachs) [type-1]++;
ICMP6_INC_STATS_BH(Icmp6OutMsgs);
out:
icmpv6_xmit_unlock();
......@@ -539,9 +539,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
type = hdr->icmp6_type;
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
(&icmpv6_statistics[smp_processor_id()*2].Icmp6InDestUnreachs)[type-ICMPV6_DEST_UNREACH]++;
ICMP6_STATS_PTR_BH(Icmp6InDestUnreachs)[type-ICMPV6_DEST_UNREACH]++;
else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
(&icmpv6_statistics[smp_processor_id()*2].Icmp6InEchos)[type-ICMPV6_ECHO_REQUEST]++;
ICMP6_STATS_PTR_BH(Icmp6InEchos)[type-ICMPV6_ECHO_REQUEST]++;
switch (type) {
case ICMPV6_ECHO_REQUEST:
......
......@@ -51,7 +51,7 @@
#include <asm/uaccess.h>
struct ipv6_mib ipv6_statistics[NR_CPUS*2];
DEFINE_SNMP_STAT(struct ipv6_mib, ipv6_statistics);
static struct packet_type ipv6_packet_type =
{
......
......@@ -59,11 +59,11 @@ int afinet6_get_info(char *buffer, char **start, off_t offset, int length)
static struct snmp6_item
{
char *name;
unsigned long *ptr;
int mibsize;
void **mib;
int offset;
} snmp6_list[] = {
/* ipv6 mib according to draft-ietf-ipngwg-ipv6-mib-04 */
#define SNMP6_GEN(x) { #x , &ipv6_statistics[0].x, sizeof(struct ipv6_mib)/sizeof(unsigned long) }
#define SNMP6_GEN(x) { #x , (void **)ipv6_statistics, offsetof(struct ipv6_mib, x) }
SNMP6_GEN(Ip6InReceives),
SNMP6_GEN(Ip6InHdrErrors),
SNMP6_GEN(Ip6InTooBigErrors),
......@@ -97,7 +97,7 @@ static struct snmp6_item
OutRouterAdvertisements too.
OutGroupMembQueries too.
*/
#define SNMP6_GEN(x) { #x , &icmpv6_statistics[0].x, sizeof(struct icmpv6_mib)/sizeof(unsigned long) }
#define SNMP6_GEN(x) { #x , (void **)icmpv6_statistics, offsetof(struct icmpv6_mib, x) }
SNMP6_GEN(Icmp6InMsgs),
SNMP6_GEN(Icmp6InErrors),
SNMP6_GEN(Icmp6InDestUnreachs),
......@@ -127,7 +127,7 @@ static struct snmp6_item
SNMP6_GEN(Icmp6OutGroupMembResponses),
SNMP6_GEN(Icmp6OutGroupMembReductions),
#undef SNMP6_GEN
#define SNMP6_GEN(x) { "Udp6" #x , &udp_stats_in6[0].Udp##x, sizeof(struct udp_mib)/sizeof(unsigned long) }
#define SNMP6_GEN(x) { "Udp6" #x , (void **)udp_stats_in6, offsetof(struct udp_mib, Udp##x) }
SNMP6_GEN(InDatagrams),
SNMP6_GEN(NoPorts),
SNMP6_GEN(InErrors),
......@@ -135,16 +135,22 @@ static struct snmp6_item
#undef SNMP6_GEN
};
static unsigned long fold_field(unsigned long *ptr, int size)
static unsigned long
fold_field(void *mib[], int offt)
{
unsigned long res = 0;
int i;
for (i=0; i<NR_CPUS; i++) {
res += ptr[2*i*size];
res += ptr[(2*i+1)*size];
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
res +=
*((unsigned long *) (((void *)per_cpu_ptr(mib[0], i)) +
offt));
res +=
*((unsigned long *) (((void *)per_cpu_ptr(mib[1], i)) +
offt));
}
return res;
}
......@@ -155,7 +161,7 @@ int afinet6_get_snmp(char *buffer, char **start, off_t offset, int length)
for (i=0; i<sizeof(snmp6_list)/sizeof(snmp6_list[0]); i++)
len += sprintf(buffer+len, "%-32s\t%ld\n", snmp6_list[i].name,
fold_field(snmp6_list[i].ptr, snmp6_list[i].mibsize));
fold_field(snmp6_list[i].mib, snmp6_list[i].offset));
len -= offset;
......
......@@ -51,7 +51,7 @@
#include <net/checksum.h>
struct udp_mib udp_stats_in6[NR_CPUS*2];
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
/* XXX This is identical to tcp_ipv6.c:ipv6_rcv_saddr_equal, put
* XXX it somewhere common. -DaveM
......
......@@ -60,7 +60,7 @@
/* Global data structures. */
sctp_protocol_t sctp_proto;
struct proc_dir_entry *proc_net_sctp;
struct sctp_mib sctp_statistics[NR_CPUS * 2];
DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
/* This is the global socket data structure used for responding to
* the Out-of-the-blue (OOTB) packets. A control sock will be created
......@@ -653,6 +653,40 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
return 1;
}
static int __init init_sctp_mibs(void)
{
int i;
sctp_statistics[0] = kmalloc_percpu(sizeof (struct sctp_mib),
GFP_KERNEL);
if (!sctp_statistics[0])
return -ENOMEM;
sctp_statistics[1] = kmalloc_percpu(sizeof (struct sctp_mib),
GFP_KERNEL);
if (!sctp_statistics[1]) {
kfree_percpu(sctp_statistics[0]);
return -ENOMEM;
}
/* Zero all percpu versions of the mibs */
for (i = 0; i < NR_CPUS; i++) {
if (cpu_possible(i)) {
memset(per_cpu_ptr(sctp_statistics[0], i), 0,
sizeof (struct sctp_mib));
memset(per_cpu_ptr(sctp_statistics[1], i), 0,
sizeof (struct sctp_mib));
}
}
return 0;
}
static void cleanup_sctp_mibs(void)
{
kfree_percpu(sctp_statistics[0]);
kfree_percpu(sctp_statistics[1]);
}
/* Initialize the universe into something sensible. */
int sctp_init(void)
{
......@@ -666,6 +700,11 @@ int sctp_init(void)
/* Add SCTP to inetsw linked list. */
inet_register_protosw(&sctp_protosw);
/* Allocate and initialise sctp mibs. */
status = init_sctp_mibs();
if (status)
goto err_init_mibs;
/* Initialize proc fs directory. */
sctp_proc_init();
......@@ -805,6 +844,8 @@ int sctp_init(void)
err_ahash_alloc:
sctp_dbg_objcnt_exit();
sctp_proc_exit();
cleanup_sctp_mibs();
err_init_mibs:
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
inet_unregister_protosw(&sctp_protosw);
return status;
......@@ -836,6 +877,7 @@ void sctp_exit(void)
sctp_dbg_objcnt_exit();
sctp_proc_exit();
cleanup_sctp_mibs();
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
inet_unregister_protosw(&sctp_protosw);
......
......@@ -75,7 +75,6 @@
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/wireless.h>
#include <linux/divert.h>
#include <linux/mount.h>
......@@ -189,10 +188,7 @@ static __inline__ void net_family_read_unlock(void)
* Statistics counters of the socket lists
*/
static union {
int counter;
char __pad[SMP_CACHE_BYTES];
} sockets_in_use[NR_CPUS] __cacheline_aligned = {{0}};
static DEFINE_PER_CPU(int, sockets_in_use) = 0;
/*
* Support routines. Move socket addresses back and forth across the kernel/user
......@@ -475,7 +471,8 @@ struct socket *sock_alloc(void)
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
sockets_in_use[smp_processor_id()].counter++;
get_cpu_var(sockets_in_use)++;
put_cpu_var(sockets_in_use);
return sock;
}
......@@ -511,7 +508,8 @@ void sock_release(struct socket *sock)
if (sock->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
sockets_in_use[smp_processor_id()].counter--;
get_cpu_var(sockets_in_use)--;
put_cpu_var(sockets_in_use);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
......@@ -1851,7 +1849,7 @@ void socket_seq_show(struct seq_file *seq)
int counter = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++)
counter += sockets_in_use[cpu].counter;
counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */
if (counter < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment