Commit b0ab6f92 authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

net: sched: enable per cpu qstats

After previous patches to simplify qstats the qstats can be
made per cpu with a packed union in Qdisc struct.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 64015853
...@@ -41,7 +41,8 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d, ...@@ -41,7 +41,8 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b, const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r); struct gnet_stats_rate_est64 *r);
int gnet_stats_copy_queue(struct gnet_dump *d, int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue *q, __u32 len); struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d); int gnet_stats_finish_copy(struct gnet_dump *d);
......
...@@ -90,7 +90,10 @@ struct Qdisc { ...@@ -90,7 +90,10 @@ struct Qdisc {
struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_basic_cpu __percpu *cpu_bstats;
} __packed; } __packed;
unsigned int __state; unsigned int __state;
struct gnet_stats_queue qstats; union {
struct gnet_stats_queue qstats;
struct gnet_stats_queue __percpu *cpu_qstats;
} __packed;
struct rcu_head rcu_head; struct rcu_head rcu_head;
int padded; int padded;
atomic_t refcnt; atomic_t refcnt;
...@@ -543,6 +546,13 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch) ...@@ -543,6 +546,13 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch)
sch->qstats.drops++; sch->qstats.drops++;
} }
static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
{
struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
qstats->drops++;
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch) static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{ {
sch->qstats.overlimits++; sch->qstats.overlimits++;
......
...@@ -215,33 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d, ...@@ -215,33 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
} }
EXPORT_SYMBOL(gnet_stats_copy_rate_est); EXPORT_SYMBOL(gnet_stats_copy_rate_est);
static void
__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *q)
{
int i;
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
qstats->qlen = 0;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
qstats->overlimits += qcpu->overlimits;
}
}
static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *cpu,
const struct gnet_stats_queue *q,
__u32 qlen)
{
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
} else {
qstats->qlen = q->qlen;
qstats->backlog = q->backlog;
qstats->drops = q->drops;
qstats->requeues = q->requeues;
qstats->overlimits = q->overlimits;
}
qstats->qlen = qlen;
}
/** /**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV * gnet_stats_copy_queue - copy queue statistics into statistics TLV
* @d: dumping handle * @d: dumping handle
* @cpu_q: per cpu queue statistics
* @q: queue statistics * @q: queue statistics
* @qlen: queue length statistics * @qlen: queue length statistics
* *
* Appends the queue statistics to the top level TLV created by * Appends the queue statistics to the top level TLV created by
* gnet_stats_start_copy(). * gnet_stats_start_copy(). Using per cpu queue statistics if
* they are available.
* *
* Returns 0 on success or -1 with the statistic lock released * Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient. * if the room in the socket buffer was not sufficient.
*/ */
int int
gnet_stats_copy_queue(struct gnet_dump *d, gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen) struct gnet_stats_queue *q, __u32 qlen)
{ {
q->qlen = qlen; struct gnet_stats_queue qstats = {0};
__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
if (d->compat_tc_stats) { if (d->compat_tc_stats) {
d->tc_stats.drops = q->drops; d->tc_stats.drops = qstats.drops;
d->tc_stats.qlen = q->qlen; d->tc_stats.qlen = qstats.qlen;
d->tc_stats.backlog = q->backlog; d->tc_stats.backlog = qstats.backlog;
d->tc_stats.overlimits = q->overlimits; d->tc_stats.overlimits = qstats.overlimits;
} }
if (d->tail) if (d->tail)
return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q)); return gnet_stats_copy(d, TCA_STATS_QUEUE,
&qstats, sizeof(qstats));
return 0; return 0;
} }
......
...@@ -623,7 +623,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, ...@@ -623,7 +623,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 || if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 || &p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, gnet_stats_copy_queue(&d, NULL,
&p->tcfc_qstats, &p->tcfc_qstats,
p->tcfc_qstats.qlen) < 0) p->tcfc_qstats.qlen) < 0)
goto errout; goto errout;
......
...@@ -947,6 +947,10 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -947,6 +947,10 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
alloc_percpu(struct gnet_stats_basic_cpu); alloc_percpu(struct gnet_stats_basic_cpu);
if (!sch->cpu_bstats) if (!sch->cpu_bstats)
goto err_out4; goto err_out4;
sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!sch->cpu_qstats)
goto err_out4;
} }
if (tca[TCA_STAB]) { if (tca[TCA_STAB]) {
...@@ -995,6 +999,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -995,6 +999,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
err_out4: err_out4:
free_percpu(sch->cpu_bstats); free_percpu(sch->cpu_bstats);
free_percpu(sch->cpu_qstats);
/* /*
* Any broken qdiscs that would require a ops->reset() here? * Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary. * The qdisc was never in action so it shouldn't be necessary.
...@@ -1313,6 +1318,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1313,6 +1318,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event) u32 portid, u32 seq, u16 flags, int event)
{ {
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm; struct tcmsg *tcm;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
...@@ -1349,12 +1355,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1349,12 +1355,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure; goto nla_put_failure;
if (qdisc_is_percpu_stats(q)) if (qdisc_is_percpu_stats(q)) {
cpu_bstats = q->cpu_bstats; cpu_bstats = q->cpu_bstats;
cpu_qstats = q->cpu_qstats;
}
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, &q->qstats, qlen) < 0) gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_finish_copy(&d) < 0) if (gnet_stats_finish_copy(&d) < 0)
......
...@@ -638,7 +638,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -638,7 +638,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct atm_flow_data *flow = (struct atm_flow_data *)arg; struct atm_flow_data *flow = (struct atm_flow_data *)arg;
if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
gnet_stats_copy_queue(d, &flow->qstats, flow->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -1602,7 +1602,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1602,7 +1602,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats, cl->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
......
...@@ -284,7 +284,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -284,7 +284,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qdisc->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -550,7 +550,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -550,7 +550,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.backlog = q->backlogs[idx]; qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped; qs.drops = flow->dropped;
} }
if (gnet_stats_copy_queue(d, &qs, 0) < 0) if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
return -1; return -1;
if (idx < q->flows_cnt) if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -1378,7 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1378,7 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats, cl->qdisc->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -1147,7 +1147,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1147,7 +1147,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
......
...@@ -200,7 +200,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -200,7 +200,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
} }
......
...@@ -356,14 +356,15 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -356,14 +356,15 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
/* Reclaim root sleeping lock before completing stats */ /* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock); spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, &qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1; return -1;
} else { } else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0) gnet_stats_copy_queue(d, NULL,
&sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
} }
return 0; return 0;
......
...@@ -361,7 +361,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -361,7 +361,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -325,7 +325,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -325,7 +325,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -670,7 +670,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -670,7 +670,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) gnet_stats_copy_queue(d, NULL,
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -871,7 +871,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -871,7 +871,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.qlen = slot->qlen; qs.qlen = slot->qlen;
qs.backlog = slot->backlog; qs.backlog = slot->backlog;
} }
if (gnet_stats_copy_queue(d, &qs, qs.qlen) < 0) if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment