Commit 22e0f8b9 authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

net: sched: make bstats per cpu and estimator RCU safe

In order to run qdisc's without locking statistics and estimators
need to be handled correctly.

To resolve bstats make the statistics per cpu. And because this is
only needed for qdiscs that are running without locks which is not
the case for most qdiscs in the near future only create percpu
stats when qdiscs set the TCQ_F_CPUSTATS flag.

Next because estimators use the bstats to calculate packets per
second and bytes per second the estimator code paths are updated
to use the per cpu statistics.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79cf79ab
...@@ -6,6 +6,11 @@ ...@@ -6,6 +6,11 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
};
struct gnet_dump { struct gnet_dump {
spinlock_t * lock; spinlock_t * lock;
struct sk_buff * skb; struct sk_buff * skb;
...@@ -27,7 +32,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, ...@@ -27,7 +32,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d); spinlock_t *lock, struct gnet_dump *d);
int gnet_stats_copy_basic(struct gnet_dump *d, int gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_packed *b);
void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d, int gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b, const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r); struct gnet_stats_rate_est64 *r);
...@@ -37,11 +46,13 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); ...@@ -37,11 +46,13 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d); int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est); struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
#include <linux/pkt_cls.h> #include <linux/pkt_cls.h>
#include <linux/percpu.h>
#include <net/gen_stats.h> #include <net/gen_stats.h>
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
...@@ -58,6 +59,7 @@ struct Qdisc { ...@@ -58,6 +59,7 @@ struct Qdisc {
* multiqueue device. * multiqueue device.
*/ */
#define TCQ_F_WARN_NONWC (1 << 16) #define TCQ_F_WARN_NONWC (1 << 16)
#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
u32 limit; u32 limit;
const struct Qdisc_ops *ops; const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab; struct qdisc_size_table __rcu *stab;
...@@ -83,7 +85,10 @@ struct Qdisc { ...@@ -83,7 +85,10 @@ struct Qdisc {
*/ */
unsigned long state; unsigned long state;
struct sk_buff_head q; struct sk_buff_head q;
struct gnet_stats_basic_packed bstats; union {
struct gnet_stats_basic_packed bstats;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
} __packed;
unsigned int __state; unsigned int __state;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct rcu_head rcu_head; struct rcu_head rcu_head;
...@@ -487,6 +492,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) ...@@ -487,6 +492,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
} }
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & TCQ_F_CPUSTATS;
}
static inline void bstats_update(struct gnet_stats_basic_packed *bstats, static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb) const struct sk_buff *skb)
...@@ -495,6 +504,17 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, ...@@ -495,6 +504,17 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
} }
static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
const struct sk_buff *skb)
{
struct gnet_stats_basic_cpu *bstats =
this_cpu_ptr(sch->cpu_bstats);
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
static inline void qdisc_bstats_update(struct Qdisc *sch, static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
......
...@@ -91,6 +91,8 @@ struct gen_estimator ...@@ -91,6 +91,8 @@ struct gen_estimator
u32 avpps; u32 avpps;
struct rcu_head e_rcu; struct rcu_head e_rcu;
struct rb_node node; struct rb_node node;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct rcu_head head;
}; };
struct gen_estimator_head struct gen_estimator_head
...@@ -115,9 +117,8 @@ static void est_timer(unsigned long arg) ...@@ -115,9 +117,8 @@ static void est_timer(unsigned long arg)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(e, &elist[idx].list, list) { list_for_each_entry_rcu(e, &elist[idx].list, list) {
u64 nbytes; struct gnet_stats_basic_packed b = {0};
u64 brate; u64 brate;
u32 npackets;
u32 rate; u32 rate;
spin_lock(e->stats_lock); spin_lock(e->stats_lock);
...@@ -125,15 +126,15 @@ static void est_timer(unsigned long arg) ...@@ -125,15 +126,15 @@ static void est_timer(unsigned long arg)
if (e->bstats == NULL) if (e->bstats == NULL)
goto skip; goto skip;
nbytes = e->bstats->bytes; __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
npackets = e->bstats->packets;
brate = (nbytes - e->last_bytes)<<(7 - idx); brate = (b.bytes - e->last_bytes)<<(7 - idx);
e->last_bytes = nbytes; e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5; e->rate_est->bps = (e->avbps+0xF)>>5;
rate = (npackets - e->last_packets)<<(12 - idx); rate = (b.packets - e->last_packets)<<(12 - idx);
e->last_packets = npackets; e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
e->rate_est->pps = (e->avpps+0x1FF)>>10; e->rate_est->pps = (e->avpps+0x1FF)>>10;
skip: skip:
...@@ -203,12 +204,14 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats ...@@ -203,12 +204,14 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* *
*/ */
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, spinlock_t *stats_lock,
struct nlattr *opt) struct nlattr *opt)
{ {
struct gen_estimator *est; struct gen_estimator *est;
struct gnet_estimator *parm = nla_data(opt); struct gnet_estimator *parm = nla_data(opt);
struct gnet_stats_basic_packed b = {0};
int idx; int idx;
if (nla_len(opt) < sizeof(*parm)) if (nla_len(opt) < sizeof(*parm))
...@@ -221,15 +224,18 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -221,15 +224,18 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (est == NULL) if (est == NULL)
return -ENOBUFS; return -ENOBUFS;
__gnet_stats_copy_basic(&b, cpu_bstats, bstats);
idx = parm->interval + 2; idx = parm->interval + 2;
est->bstats = bstats; est->bstats = bstats;
est->rate_est = rate_est; est->rate_est = rate_est;
est->stats_lock = stats_lock; est->stats_lock = stats_lock;
est->ewma_log = parm->ewma_log; est->ewma_log = parm->ewma_log;
est->last_bytes = bstats->bytes; est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5; est->avbps = rate_est->bps<<5;
est->last_packets = bstats->packets; est->last_packets = b.packets;
est->avpps = rate_est->pps<<10; est->avpps = rate_est->pps<<10;
est->cpu_bstats = cpu_bstats;
spin_lock_bh(&est_tree_lock); spin_lock_bh(&est_tree_lock);
if (!elist[idx].timer.function) { if (!elist[idx].timer.function) {
...@@ -290,11 +296,12 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -290,11 +296,12 @@ EXPORT_SYMBOL(gen_kill_estimator);
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
*/ */
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt) spinlock_t *stats_lock, struct nlattr *opt)
{ {
gen_kill_estimator(bstats, rate_est); gen_kill_estimator(bstats, rate_est);
return gen_new_estimator(bstats, rate_est, stats_lock, opt); return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
} }
EXPORT_SYMBOL(gen_replace_estimator); EXPORT_SYMBOL(gen_replace_estimator);
......
...@@ -97,6 +97,43 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, ...@@ -97,6 +97,43 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
} }
EXPORT_SYMBOL(gnet_stats_start_copy); EXPORT_SYMBOL(gnet_stats_start_copy);
static void
__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu)
{
int i;
for_each_possible_cpu(i) {
struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
unsigned int start;
__u64 bytes;
__u32 packets;
do {
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
bytes = bcpu->bstats.bytes;
packets = bcpu->bstats.packets;
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
bstats->bytes += bcpu->bstats.bytes;
bstats->packets += bcpu->bstats.packets;
}
}
void
__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
{
if (cpu) {
__gnet_stats_copy_basic_cpu(bstats, cpu);
} else {
bstats->bytes = b->bytes;
bstats->packets = b->packets;
}
}
EXPORT_SYMBOL(__gnet_stats_copy_basic);
/** /**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV * gnet_stats_copy_basic - copy basic statistics into statistic TLV
* @d: dumping handle * @d: dumping handle
...@@ -109,19 +146,25 @@ EXPORT_SYMBOL(gnet_stats_start_copy); ...@@ -109,19 +146,25 @@ EXPORT_SYMBOL(gnet_stats_start_copy);
* if the room in the socket buffer was not sufficient. * if the room in the socket buffer was not sufficient.
*/ */
int int
gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
{ {
struct gnet_stats_basic_packed bstats = {0};
__gnet_stats_copy_basic(&bstats, cpu, b);
if (d->compat_tc_stats) { if (d->compat_tc_stats) {
d->tc_stats.bytes = b->bytes; d->tc_stats.bytes = bstats.bytes;
d->tc_stats.packets = b->packets; d->tc_stats.packets = bstats.packets;
} }
if (d->tail) { if (d->tail) {
struct gnet_stats_basic sb; struct gnet_stats_basic sb;
memset(&sb, 0, sizeof(sb)); memset(&sb, 0, sizeof(sb));
sb.bytes = b->bytes; sb.bytes = bstats.bytes;
sb.packets = b->packets; sb.packets = bstats.packets;
return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb)); return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
} }
return 0; return 0;
......
...@@ -136,7 +136,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) ...@@ -136,7 +136,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.interval = info->interval; cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log; cfg.est.ewma_log = info->ewma_log;
ret = gen_new_estimator(&est->bstats, &est->rstats, ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
&est->lock, &cfg.opt); &est->lock, &cfg.opt);
if (ret < 0) if (ret < 0)
goto err2; goto err2;
......
...@@ -252,7 +252,8 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, ...@@ -252,7 +252,8 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
p->tcfc_tm.install = jiffies; p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies; p->tcfc_tm.lastuse = jiffies;
if (est) { if (est) {
int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, int err = gen_new_estimator(&p->tcfc_bstats, NULL,
&p->tcfc_rate_est,
&p->tcfc_lock, est); &p->tcfc_lock, est);
if (err) { if (err) {
kfree(p); kfree(p);
...@@ -619,7 +620,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, ...@@ -619,7 +620,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0) if (err < 0)
goto errout; goto errout;
if (gnet_stats_copy_basic(&d, &p->tcfc_bstats) < 0 || if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 || &p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0) gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0)
......
...@@ -178,7 +178,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, ...@@ -178,7 +178,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
spin_lock_bh(&police->tcf_lock); spin_lock_bh(&police->tcf_lock);
if (est) { if (est) {
err = gen_replace_estimator(&police->tcf_bstats, err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est, &police->tcf_rate_est,
&police->tcf_lock, est); &police->tcf_lock, est);
if (err) if (err)
......
...@@ -942,6 +942,13 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -942,6 +942,13 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
sch->handle = handle; sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
if (qdisc_is_percpu_stats(sch)) {
sch->cpu_bstats =
alloc_percpu(struct gnet_stats_basic_cpu);
if (!sch->cpu_bstats)
goto err_out4;
}
if (tca[TCA_STAB]) { if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB]); stab = qdisc_get_stab(tca[TCA_STAB]);
if (IS_ERR(stab)) { if (IS_ERR(stab)) {
...@@ -964,8 +971,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -964,8 +971,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
else else
root_lock = qdisc_lock(sch); root_lock = qdisc_lock(sch);
err = gen_new_estimator(&sch->bstats, &sch->rate_est, err = gen_new_estimator(&sch->bstats,
root_lock, tca[TCA_RATE]); sch->cpu_bstats,
&sch->rate_est,
root_lock,
tca[TCA_RATE]);
if (err) if (err)
goto err_out4; goto err_out4;
} }
...@@ -984,6 +994,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -984,6 +994,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
return NULL; return NULL;
err_out4: err_out4:
free_percpu(sch->cpu_bstats);
/* /*
* Any broken qdiscs that would require a ops->reset() here? * Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary. * The qdisc was never in action so it shouldn't be necessary.
...@@ -1022,9 +1033,11 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) ...@@ -1022,9 +1033,11 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
because change can't be undone. */ because change can't be undone. */
if (sch->flags & TCQ_F_MQROOT) if (sch->flags & TCQ_F_MQROOT)
goto out; goto out;
gen_replace_estimator(&sch->bstats, &sch->rate_est, gen_replace_estimator(&sch->bstats,
qdisc_root_sleeping_lock(sch), sch->cpu_bstats,
tca[TCA_RATE]); &sch->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
} }
out: out:
return 0; return 0;
...@@ -1299,6 +1312,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) ...@@ -1299,6 +1312,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event) u32 portid, u32 seq, u16 flags, int event)
{ {
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
struct tcmsg *tcm; struct tcmsg *tcm;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
...@@ -1334,7 +1348,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1334,7 +1348,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || if (qdisc_is_percpu_stats(q))
cpu_bstats = q->cpu_bstats;
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, &q->qstats) < 0) gnet_stats_copy_queue(&d, &q->qstats) < 0)
goto nla_put_failure; goto nla_put_failure;
......
...@@ -639,7 +639,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -639,7 +639,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
flow->qstats.qlen = flow->q->q.qlen; flow->qstats.qlen = flow->q->q.qlen;
if (gnet_stats_copy_basic(d, &flow->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
gnet_stats_copy_queue(d, &flow->qstats) < 0) gnet_stats_copy_queue(d, &flow->qstats) < 0)
return -1; return -1;
......
...@@ -1601,7 +1601,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1601,7 +1601,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (cl->undertime != PSCHED_PASTPERFECT) if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now; cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0) gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1; return -1;
...@@ -1759,7 +1759,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1759,7 +1759,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
} }
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
...@@ -1852,7 +1853,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1852,7 +1853,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
goto failure; goto failure;
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
......
...@@ -88,7 +88,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -88,7 +88,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) { if (cl != NULL) {
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
...@@ -116,7 +117,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -116,7 +117,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = &noop_qdisc; cl->qdisc = &noop_qdisc;
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
...@@ -282,7 +283,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -282,7 +283,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
} }
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
return -1; return -1;
......
...@@ -632,6 +632,9 @@ static void qdisc_rcu_free(struct rcu_head *head) ...@@ -632,6 +632,9 @@ static void qdisc_rcu_free(struct rcu_head *head)
{ {
struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
if (qdisc_is_percpu_stats(qdisc))
free_percpu(qdisc->cpu_bstats);
kfree((char *) qdisc - qdisc->padded); kfree((char *) qdisc - qdisc->padded);
} }
......
...@@ -1014,9 +1014,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1014,9 +1014,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cur_time = psched_get_time(); cur_time = psched_get_time();
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, spinlock_t *lock = qdisc_root_sleeping_lock(sch);
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
lock,
tca[TCA_RATE]);
if (err) if (err)
return err; return err;
} }
...@@ -1063,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1063,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return -ENOBUFS; return -ENOBUFS;
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
...@@ -1374,7 +1377,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1374,7 +1377,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.work = cl->cl_total; xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul; xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0) gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1; return -1;
......
...@@ -1144,7 +1144,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1144,7 +1144,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0) gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1; return -1;
...@@ -1402,7 +1402,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1402,7 +1402,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure; goto failure;
if (htb_rate_est || tca[TCA_RATE]) { if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE] ? : &est.nla); tca[TCA_RATE] ? : &est.nla);
if (err) { if (err) {
...@@ -1464,8 +1465,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1464,8 +1465,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent->children++; parent->children++;
} else { } else {
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, spinlock_t *lock = qdisc_root_sleeping_lock(sch);
qdisc_root_sleeping_lock(sch),
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
lock,
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
......
...@@ -201,7 +201,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -201,7 +201,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
sch->qstats.qlen = sch->q.qlen; sch->qstats.qlen = sch->q.qlen;
if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats) < 0) gnet_stats_copy_queue(d, &sch->qstats) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -355,7 +355,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -355,7 +355,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
} }
/* Reclaim root sleeping lock before completing stats */ /* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock); spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, &bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, &qstats) < 0) gnet_stats_copy_queue(d, &qstats) < 0)
return -1; return -1;
} else { } else {
...@@ -363,7 +363,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -363,7 +363,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
sch->qstats.qlen = sch->q.qlen; sch->qstats.qlen = sch->q.qlen;
if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats) < 0) gnet_stats_copy_queue(d, &sch->qstats) < 0)
return -1; return -1;
} }
......
...@@ -361,7 +361,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -361,7 +361,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
cl_q->qstats.qlen = cl_q->q.qlen; cl_q->qstats.qlen = cl_q->q.qlen;
if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0) gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
return -1; return -1;
......
...@@ -325,7 +325,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -325,7 +325,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
cl_q->qstats.qlen = cl_q->q.qlen; cl_q->qstats.qlen = cl_q->q.qlen;
if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0) gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
return -1; return -1;
......
...@@ -459,7 +459,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -459,7 +459,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) { /* modify existing class */ if (cl != NULL) { /* modify existing class */
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
...@@ -484,7 +485,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -484,7 +485,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = &noop_qdisc; cl->qdisc = &noop_qdisc;
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch), qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
...@@ -667,7 +669,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -667,7 +669,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.weight = cl->agg->class_weight; xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax; xstats.lmax = cl->agg->lmax;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
return -1; return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment