Commit 276ca536 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/linux-ia64-release-2.6.10

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 148b43d6 cde1321b
......@@ -275,7 +275,8 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
"bad page in #%lu",
inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
continue;
ret = -EIO;
goto done;
}
kaddr = page_address(page);
if (need_revalidate) {
......
......@@ -109,6 +109,8 @@ struct divert_cf
#include <linux/skbuff.h>
#ifdef CONFIG_NET_DIVERT
#include <linux/netdevice.h>
int alloc_divert_blk(struct net_device *);
void free_divert_blk(struct net_device *);
int divert_ioctl(unsigned int cmd, struct divert_cf __user *arg);
......
......@@ -6,6 +6,9 @@
#define __LINUX_FILTER_H__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/atomic.h>
/*
* Current version of the filter code architecture.
......@@ -135,6 +138,9 @@ static inline unsigned int sk_filter_len(struct sk_filter *fp)
#define SKF_LL_OFF (-0x200000)
#ifdef __KERNEL__
struct sk_buff;
struct sock;
extern int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);
......
......@@ -17,6 +17,8 @@
#ifndef _LINUX_ICMP_H
#define _LINUX_ICMP_H
#include <linux/types.h>
#define ICMP_ECHOREPLY 0 /* Echo Reply */
#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
#define ICMP_SOURCE_QUENCH 4 /* Source Quench */
......
......@@ -43,6 +43,8 @@
#define _LINUX_INET_H
#ifdef __KERNEL__
#include <linux/types.h>
extern __u32 in_aton(const char *str);
#endif
#endif /* _LINUX_INET_H */
......@@ -3,7 +3,10 @@
#ifdef __KERNEL__
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
struct ipv4_devconf
{
......
......@@ -699,6 +699,7 @@ enum
TCA_RATE,
TCA_FCNT,
TCA_STATS2,
TCA_ACT_STATS,
__TCA_MAX
};
......
......@@ -28,7 +28,9 @@ struct tcf_police
struct qdisc_rate_table *R_tab;
struct qdisc_rate_table *P_tab;
struct tc_stats stats;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
spinlock_t *stats_lock;
};
......@@ -44,10 +46,16 @@ struct tcf_##name *next; \
u32 capab; \
int action; \
struct tcf_t tm; \
struct tc_stats stats; \
struct gnet_stats_basic bstats; \
struct gnet_stats_queue qstats; \
struct gnet_stats_rate_est rate_est; \
spinlock_t *stats_lock; \
spinlock_t lock
struct tcf_act_hdr
{
tca_gen(act_hdr);
};
struct tc_action
{
......@@ -95,6 +103,7 @@ extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t
extern void tcf_police_destroy(struct tcf_police *p);
extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
extern int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p);
static inline int
tcf_police_release(struct tcf_police *p, int bind)
......
......@@ -198,6 +198,7 @@
/***************************** INCLUDES *****************************/
#include <linux/wireless.h> /* IOCTL user space API */
#include <linux/if_ether.h>
/***************************** VERSION *****************************/
/*
......@@ -294,6 +295,8 @@ struct iw_request_info
__u16 flags; /* More to come ;-) */
};
struct net_device;
/*
* This is how a function handling a Wireless Extension should look
* like (both get and set, standard and private).
......
......@@ -189,7 +189,7 @@ struct neigh_table
struct timer_list gc_timer;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
int entries;
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
struct neigh_parms *parms_list;
......
......@@ -60,7 +60,7 @@ tcf_hash_destroy(struct tcf_st *p)
*p1p = p->next;
write_unlock_bh(&tcf_t_lock);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&p->stats);
gen_kill_estimator(&p->bstats, &p->rate_est);
#endif
kfree(p);
return;
......@@ -256,9 +256,8 @@ tcf_hash_create(struct tc_st *parm, struct rtattr *est, struct tc_action *a, int
p->tm.install = jiffies;
p->tm.lastuse = jiffies;
#ifdef CONFIG_NET_ESTIMATOR
if (est) {
qdisc_new_estimator(&p->stats, p->stats_lock, est);
}
if (est)
gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
#endif
h = tcf_hash(p->index);
write_lock_bh(&tcf_t_lock);
......
......@@ -1261,11 +1261,6 @@ int dev_queue_xmit(struct sk_buff *skb)
struct Qdisc *q;
int rc = -ENOMEM;
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
local_bh_disable();
if (skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST) &&
__skb_linearize(skb, GFP_ATOMIC))
......@@ -1290,6 +1285,11 @@ int dev_queue_xmit(struct sk_buff *skb)
if (skb_checksum_help(skb, 0))
goto out_kfree_skb;
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
local_bh_disable();
/* Updates of qdisc are serialized by queue_lock.
* The struct Qdisc which is pointed to by qdisc is now a
* rcu structure - it may be accessed without acquiring
......@@ -1352,7 +1352,6 @@ int dev_queue_xmit(struct sk_buff *skb)
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %s asks to "
"queue packet!\n", dev->name);
goto out_enetdown;
} else {
/* Recursion is detected! It is possible,
* unfortunately */
......@@ -1361,10 +1360,13 @@ int dev_queue_xmit(struct sk_buff *skb)
"%s, fix it urgently!\n", dev->name);
}
}
out_enetdown:
rc = -ENETDOWN;
local_bh_enable();
out_kfree_skb:
kfree_skb(skb);
return rc;
out:
local_bh_enable();
return rc;
......@@ -2374,6 +2376,9 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return dev_set_mtu(dev, ifr->ifr_mtu);
case SIOCGIFHWADDR:
if (!dev->addr_len)
memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
else
memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
ifr->ifr_hwaddr.sa_family = dev->type;
......
......@@ -125,6 +125,7 @@ gnet_stats_finish_copy(struct gnet_dump *d)
EXPORT_SYMBOL(gnet_stats_start_copy);
EXPORT_SYMBOL(gnet_stats_start_copy_compat);
EXPORT_SYMBOL(gnet_stats_copy_basic);
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
EXPORT_SYMBOL(gnet_stats_copy_queue);
......
......@@ -254,18 +254,20 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
if (tbl->entries > tbl->gc_thresh3 ||
(tbl->entries > tbl->gc_thresh2 &&
entries = atomic_inc_return(&tbl->entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
tbl->entries > tbl->gc_thresh3)
goto out;
entries >= tbl->gc_thresh3)
goto out_entries;
}
n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
if (!n)
goto out;
goto out_entries;
memset(n, 0, tbl->entry_size);
......@@ -281,12 +283,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
NEIGH_CACHE_STAT_INC(tbl, allocs);
neigh_glbl_allocs++;
tbl->entries++;
n->tbl = tbl;
atomic_set(&n->refcnt, 1);
n->dead = 1;
out:
return n;
out_entries:
atomic_dec(&tbl->entries);
goto out;
}
static struct neighbour **neigh_hash_alloc(unsigned int entries)
......@@ -427,7 +432,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
write_lock_bh(&tbl->lock);
if (tbl->entries > (tbl->hash_mask + 1))
if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
......@@ -608,7 +613,7 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
neigh_glbl_allocs--;
neigh->tbl->entries--;
atomic_dec(&neigh->tbl->entries);
kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
}
......@@ -1394,7 +1399,7 @@ int neigh_table_clear(struct neigh_table *tbl)
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
if (tbl->entries)
if (atomic_read(&tbl->entries))
printk(KERN_CRIT "neighbour leakage\n");
write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
......@@ -1951,7 +1956,7 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
"%08lx %08lx %08lx %08lx\n",
tbl->entries,
atomic_read(&tbl->entries),
st->allocs,
st->destroys,
......
......@@ -351,6 +351,7 @@ config INET_TUNNEL
config IP_TCPDIAG
tristate "IP: TCP socket monitoring interface"
depends on INET
default y
---help---
Support for TCP socket monitoring interface used by native Linux
......@@ -358,5 +359,8 @@ config IP_TCPDIAG
If unsure, say Y.
config IP_TCPDIAG_IPV6
def_bool (IP_TCPDIAG=y && IPV6=y) || (IP_TCPDIAG=m && IPV6)
source "net/ipv4/ipvs/Kconfig"
......@@ -103,14 +103,12 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_wqueue = 0;
r->tcpdiag_uid = 0;
r->tcpdiag_inode = 0;
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
&tw->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
&tw->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
return skb->len;
}
......@@ -120,7 +118,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->id.tcpdiag_src[0] = inet->rcv_saddr;
r->id.tcpdiag_dst[0] = inet->daddr;
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
......@@ -129,7 +126,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
&np->daddr);
}
#endif
#define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ
......@@ -188,11 +184,19 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
return -1;
}
extern struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
#ifdef CONFIG_IPV6
extern struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport,
int dif);
#ifdef CONFIG_IP_TCPDIAG_IPV6
extern struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
struct in6_addr *daddr, u16 dport,
int dif);
#else
static inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
struct in6_addr *daddr, u16 dport,
int dif)
{
return NULL;
}
#endif
static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
......@@ -207,13 +211,11 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
req->id.tcpdiag_src[0], req->id.tcpdiag_sport,
req->id.tcpdiag_if);
}
#ifdef CONFIG_IPV6
else if (req->tcpdiag_family == AF_INET6) {
sk = tcp_v6_lookup((struct in6_addr*)req->id.tcpdiag_dst, req->id.tcpdiag_dport,
(struct in6_addr*)req->id.tcpdiag_src, req->id.tcpdiag_sport,
req->id.tcpdiag_if);
}
#endif
else {
return -EINVAL;
}
......@@ -422,14 +424,12 @@ static int tcpdiag_dump_sock(struct sk_buff *skb, struct sock *sk,
struct inet_opt *inet = inet_sk(sk);
entry.family = sk->sk_family;
#ifdef CONFIG_IPV6
if (entry.family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
entry.saddr = np->rcv_saddr.s6_addr32;
entry.daddr = np->daddr.s6_addr32;
} else
#endif
{
entry.saddr = &inet->rcv_saddr;
entry.daddr = &inet->daddr;
......@@ -482,14 +482,12 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_wqueue = 0;
r->tcpdiag_uid = sock_i_uid(sk);
r->tcpdiag_inode = 0;
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
&req->af.v6_req.loc_addr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
&req->af.v6_req.rmt_addr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
return skb->len;
......@@ -545,16 +543,12 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
if (bc) {
entry.saddr =
#ifdef CONFIG_IPV6
(entry.family == AF_INET6) ?
req->af.v6_req.loc_addr.s6_addr32 :
#endif
&req->af.v4_req.loc_addr;
entry.daddr =
#ifdef CONFIG_IPV6
(entry.family == AF_INET6) ?
req->af.v6_req.rmt_addr.s6_addr32 :
#endif
&req->af.v4_req.rmt_addr;
entry.dport = ntohs(req->rmt_port);
......
......@@ -1078,13 +1078,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
neigh->flags |= NTF_ROUTER;
/*
* If we where using an "all destinations on link" route
* delete it
*/
rt6_purge_dflt_routers();
}
if (rt)
......
This diff is collapsed.
......@@ -416,14 +416,37 @@ int tcf_action_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
int tcf_action_copy_stats (struct sk_buff *skb,struct tc_action *a)
{
struct gnet_dump d;
struct tcf_act_hdr *h = a->priv;
#ifdef CONFIG_KMOD
/* place holder */
#endif
if (NULL == a->ops || NULL == a->ops->get_stats)
return 1;
if (NULL == h)
goto errout;
if (gnet_stats_start_copy(skb, TCA_ACT_STATS, h->stats_lock, &d) < 0)
goto errout;
if (NULL != a->ops && NULL != a->ops->get_stats)
if (a->ops->get_stats(skb, a) < 0)
goto errout;
if (gnet_stats_copy_basic(&d, &h->bstats) < 0 ||
#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(&d, &h->rate_est) < 0 ||
#endif
gnet_stats_copy_queue(&d, &h->qstats) < 0)
goto errout;
return a->ops->get_stats(skb,a);
if (gnet_stats_finish_copy(&d) < 0)
goto errout;
return 0;
errout:
return -1;
}
......
......@@ -395,11 +395,9 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
}
#else /* CONFIG_NET_CLS_ACT */
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
if (qdisc_copy_stats(skb, &f->police->stats,
f->police->stats_lock))
if (f->police)
if (tcf_police_dump_stats(skb, f->police) < 0)
goto rtattr_failure;
}
#endif /* CONFIG_NET_CLS_POLICE */
#endif /* CONFIG_NET_CLS_ACT */
return skb->len;
......
......@@ -566,11 +566,9 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
if (qdisc_copy_stats(skb, &f->police->stats,
f->police->stats_lock))
if (f->police)
if (tcf_police_dump_stats(skb, f->police) < 0)
goto rtattr_failure;
}
#endif
return skb->len;
......
......@@ -631,11 +631,9 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
if (qdisc_copy_stats(skb, &f->police->stats,
f->police->stats_lock))
if (f->police)
if (tcf_police_dump_stats(skb, f->police) < 0)
goto rtattr_failure;
}
#endif
return skb->len;
......
......@@ -775,11 +775,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
}
#else
#ifdef CONFIG_NET_CLS_POLICE
if (TC_U32_KEY(n->handle) && n->police) {
if (qdisc_copy_stats(skb, &n->police->stats,
n->police->stats_lock))
if (TC_U32_KEY(n->handle) && n->police)
if (tcf_police_dump_stats(skb, n->police) < 0)
goto rtattr_failure;
}
#endif
#endif
return skb->len;
......
......@@ -62,7 +62,7 @@ gact_net_rand(struct tcf_gact *p) {
int
gact_determ(struct tcf_gact *p) {
if (p->stats.packets%p->pval)
if (p->bstats.packets%p->pval)
return p->action;
return p->paction;
}
......@@ -163,10 +163,10 @@ tcf_gact(struct sk_buff **pskb, struct tc_action *a)
#else
action = p->action;
#endif
p->stats.bytes += skb->len;
p->stats.packets++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
if (TC_ACT_SHOT == action)
p->stats.drops++;
p->qstats.drops++;
p->tm.lastuse = jiffies;
spin_unlock(&p->lock);
......@@ -214,17 +214,6 @@ tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return -1;
}
int
tcf_gact_stats(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_gact *p;
p = PRIV(a,gact);
if (NULL != p)
return qdisc_copy_stats(skb, &p->stats,p->stats_lock);
return 1;
}
struct tc_action_ops act_gact_ops = {
.next = NULL,
.kind = "gact",
......@@ -232,7 +221,6 @@ struct tc_action_ops act_gact_ops = {
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_gact,
.get_stats = tcf_gact_stats,
.dump = tcf_gact_dump,
.cleanup = tcf_gact_cleanup,
.lookup = tcf_hash_search,
......
......@@ -218,9 +218,8 @@ tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, int ov
*/
p->tm.install = jiffies;
#ifdef CONFIG_NET_ESTIMATOR
if (est) {
qdisc_new_estimator(&p->stats, p->stats_lock, est);
}
if (est)
gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
#endif
h = tcf_hash(p->index);
write_lock_bh(&ipt_lock);
......@@ -258,8 +257,8 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a)
spin_lock(&p->lock);
p->tm.lastuse = jiffies;
p->stats.bytes += skb->len;
p->stats.packets++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
if (skb_cloned(skb) ) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
......@@ -278,7 +277,7 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a)
break;
case NF_DROP:
result = TC_ACT_SHOT;
p->stats.drops++;
p->qstats.drops++;
break;
case IPT_CONTINUE:
result = TC_ACT_PIPE;
......@@ -346,17 +345,6 @@ tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return -1;
}
int
tcf_ipt_stats(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_ipt *p;
p = PRIV(a,ipt);
if (NULL != p)
return qdisc_copy_stats(skb, &p->stats, p->stats_lock);
return 1;
}
struct tc_action_ops act_ipt_ops = {
.next = NULL,
.kind = "ipt",
......@@ -364,7 +352,6 @@ struct tc_action_ops act_ipt_ops = {
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_ipt,
.get_stats = tcf_ipt_stats,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
.lookup = tcf_hash_search,
......
......@@ -195,9 +195,9 @@ tcf_mirred(struct sk_buff **pskb, struct tc_action *a)
bad_mirred:
if (NULL != skb2)
kfree_skb(skb2);
p->stats.overlimits++;
p->stats.bytes += skb->len;
p->stats.packets++;
p->qstats.overlimits++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
spin_unlock(&p->lock);
/* should we be asking for packet to be dropped?
* may make sense for redirect case only
......@@ -216,8 +216,8 @@ tcf_mirred(struct sk_buff **pskb, struct tc_action *a)
goto bad_mirred;
}
p->stats.bytes += skb2->len;
p->stats.packets++;
p->bstats.bytes += skb2->len;
p->bstats.packets++;
if ( !(at & AT_EGRESS)) {
if (p->ok_push) {
skb_push(skb2, skb2->dev->hard_header_len);
......@@ -268,18 +268,6 @@ tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
return -1;
}
int
tcf_mirred_stats(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_mirred *p;
p = PRIV(a,mirred);
if (NULL != p)
return qdisc_copy_stats(skb, &p->stats, p->stats_lock);
return 1;
}
static struct tc_action_ops act_mirred_ops = {
.next = NULL,
.kind = "mirred",
......@@ -287,7 +275,6 @@ static struct tc_action_ops act_mirred_ops = {
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_mirred,
.get_stats = tcf_mirred_stats,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_cleanup,
.lookup = tcf_hash_search,
......
......@@ -183,10 +183,10 @@ tcf_pedit(struct sk_buff **pskb, struct tc_action *a)
}
bad:
p->stats.overlimits++;
p->qstats.overlimits++;
done:
p->stats.bytes += skb->len;
p->stats.packets++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
spin_unlock(&p->lock);
return p->action;
}
......@@ -255,17 +255,6 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
return -1;
}
int
tcf_pedit_stats(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_pedit *p;
p = PRIV(a,pedit);
if (NULL != p)
return qdisc_copy_stats(skb, &p->stats, p->stats_lock);
return 1;
}
static
struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
......@@ -273,7 +262,6 @@ struct tc_action_ops act_pedit_ops = {
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_pedit,
.get_stats = tcf_pedit_stats,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
.lookup = tcf_hash_search,
......
......@@ -149,7 +149,7 @@ void tcf_police_destroy(struct tcf_police *p)
*p1p = p->next;
write_unlock_bh(&police_lock);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&p->stats);
gen_kill_estimator(&p->bstats, &p->rate_est);
#endif
if (p->R_tab)
qdisc_put_rtab(p->R_tab);
......@@ -245,7 +245,7 @@ int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,struct tc_actio
p->index = parm->index ? : tcf_police_new_index();
#ifdef CONFIG_NET_ESTIMATOR
if (est)
qdisc_new_estimator(&p->stats, p->stats_lock, est);
gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
#endif
h = tcf_police_hash(p->index);
write_lock_bh(&police_lock);
......@@ -275,16 +275,6 @@ int tcf_act_police_cleanup(struct tc_action *a, int bind)
return 0;
}
int tcf_act_police_stats(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_police *p;
p = PRIV(a);
if (NULL != p)
return qdisc_copy_stats(skb, &p->stats, p->stats_lock);
return 1;
}
int tcf_act_police(struct sk_buff **pskb, struct tc_action *a)
{
psched_time_t now;
......@@ -302,12 +292,12 @@ int tcf_act_police(struct sk_buff **pskb, struct tc_action *a)
spin_lock(&p->lock);
p->stats.bytes += skb->len;
p->stats.packets++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
p->stats.overlimits++;
if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
p->qstats.overlimits++;
spin_unlock(&p->lock);
return p->action;
}
......@@ -343,7 +333,7 @@ int tcf_act_police(struct sk_buff **pskb, struct tc_action *a)
}
}
p->stats.overlimits++;
p->qstats.overlimits++;
spin_unlock(&p->lock);
return p->action;
}
......@@ -400,7 +390,6 @@ static struct tc_action_ops act_police_ops = {
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_act_police,
.get_stats = tcf_act_police_stats,
.dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup,
.lookup = tcf_hash_search,
......@@ -480,7 +469,7 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
p->action = parm->action;
#ifdef CONFIG_NET_ESTIMATOR
if (est)
qdisc_new_estimator(&p->stats, p->stats_lock, est);
gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
#endif
h = tcf_police_hash(p->index);
write_lock_bh(&police_lock);
......@@ -504,12 +493,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
spin_lock(&p->lock);
p->stats.bytes += skb->len;
p->stats.packets++;
p->bstats.bytes += skb->len;
p->bstats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
p->stats.overlimits++;
if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
p->qstats.overlimits++;
spin_unlock(&p->lock);
return p->action;
}
......@@ -545,7 +534,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
}
}
p->stats.overlimits++;
p->qstats.overlimits++;
spin_unlock(&p->lock);
return p->action;
}
......@@ -581,9 +570,34 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
return -1;
}
int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
{
struct gnet_dump d;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
TCA_XSTATS, p->stats_lock, &d) < 0)
if (gnet_stats_copy_basic(&d, &p->bstats) < 0 ||
#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 ||
#endif
gnet_stats_copy_queue(&d, &p->qstats) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
goto errout;
return 0;
errout:
return -1;
}
EXPORT_SYMBOL(tcf_police);
EXPORT_SYMBOL(tcf_police_destroy);
EXPORT_SYMBOL(tcf_police_dump);
EXPORT_SYMBOL(tcf_police_dump_stats);
EXPORT_SYMBOL(tcf_police_hash);
EXPORT_SYMBOL(tcf_police_ht);
EXPORT_SYMBOL(tcf_police_locate);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment