Commit 695b4ec0 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

pkt_sched: fq: use proper locking in fq_dump_stats()

When fq is used on 32bit kernels, we need to lock the qdisc before
copying 64bit fields.

Otherwise "tc -s qdisc ..." might report bogus values.

Fixes: afe4fd06 ("pkt_sched: fq: Fair Queue packet scheduler")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent db74a333
...@@ -823,20 +823,24 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -823,20 +823,24 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{ {
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
u64 now = ktime_get_ns(); struct tc_fq_qd_stats st;
struct tc_fq_qd_stats st = {
.gc_flows = q->stat_gc_flows, sch_tree_lock(sch);
.highprio_packets = q->stat_internal_packets,
.tcp_retrans = q->stat_tcp_retrans, st.gc_flows = q->stat_gc_flows;
.throttled = q->stat_throttled, st.highprio_packets = q->stat_internal_packets;
.flows_plimit = q->stat_flows_plimit, st.tcp_retrans = q->stat_tcp_retrans;
.pkts_too_long = q->stat_pkts_too_long, st.throttled = q->stat_throttled;
.allocation_errors = q->stat_allocation_errors, st.flows_plimit = q->stat_flows_plimit;
.flows = q->flows, st.pkts_too_long = q->stat_pkts_too_long;
.inactive_flows = q->inactive_flows, st.allocation_errors = q->stat_allocation_errors;
.throttled_flows = q->throttled_flows, st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
.time_next_delayed_flow = q->time_next_delayed_flow - now, st.flows = q->flows;
}; st.inactive_flows = q->inactive_flows;
st.throttled_flows = q->throttled_flows;
st.pad = 0;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st)); return gnet_stats_copy_app(d, &st, sizeof(st));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment