Commit 00ac0dc3 authored by David S. Miller's avatar David S. Miller

Merge branch 'net_sched-dump-no-rtnl'

Eric Dumazet says:

====================
net_sched: first series for RTNL-less qdisc dumps

Medium term goal is to implement "tc qdisc show" without needing
to acquire RTNL.

This first series makes the requested changes in 14 qdisc.

Notes :

 - RTNL is still held in "tc qdisc show", more changes are needed.

 - Qdisc returning many attributes might want/need to provide
   a consistent set of attributes. If that is the case, their
   dump() method could acquire the qdisc spinlock, to pair the
   spinlock acquision in their change() method.

V2: Addressed Simon feedback (Thanks a lot Simon)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fdf41237 c85cedb3
...@@ -233,10 +233,10 @@ static inline void red_set_parms(struct red_parms *p, ...@@ -233,10 +233,10 @@ static inline void red_set_parms(struct red_parms *p,
int delta = qth_max - qth_min; int delta = qth_max - qth_min;
u32 max_p_delta; u32 max_p_delta;
p->qth_min = qth_min << Wlog; WRITE_ONCE(p->qth_min, qth_min << Wlog);
p->qth_max = qth_max << Wlog; WRITE_ONCE(p->qth_max, qth_max << Wlog);
p->Wlog = Wlog; WRITE_ONCE(p->Wlog, Wlog);
p->Plog = Plog; WRITE_ONCE(p->Plog, Plog);
if (delta <= 0) if (delta <= 0)
delta = 1; delta = 1;
p->qth_delta = delta; p->qth_delta = delta;
...@@ -244,7 +244,7 @@ static inline void red_set_parms(struct red_parms *p, ...@@ -244,7 +244,7 @@ static inline void red_set_parms(struct red_parms *p,
max_P = red_maxp(Plog); max_P = red_maxp(Plog);
max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
} }
p->max_P = max_P; WRITE_ONCE(p->max_P, max_P);
max_p_delta = max_P / delta; max_p_delta = max_P / delta;
max_p_delta = max(max_p_delta, 1U); max_p_delta = max(max_p_delta, 1U);
p->max_P_reciprocal = reciprocal_value(max_p_delta); p->max_P_reciprocal = reciprocal_value(max_p_delta);
...@@ -257,7 +257,7 @@ static inline void red_set_parms(struct red_parms *p, ...@@ -257,7 +257,7 @@ static inline void red_set_parms(struct red_parms *p,
p->target_min = qth_min + 2*delta; p->target_min = qth_min + 2*delta;
p->target_max = qth_min + 3*delta; p->target_max = qth_min + 3*delta;
p->Scell_log = Scell_log; WRITE_ONCE(p->Scell_log, Scell_log);
p->Scell_max = (255 << Scell_log); p->Scell_max = (255 << Scell_log);
if (stab) if (stab)
......
...@@ -2572,6 +2572,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -2572,6 +2572,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
{ {
struct cake_sched_data *q = qdisc_priv(sch); struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CAKE_MAX + 1]; struct nlattr *tb[TCA_CAKE_MAX + 1];
u16 rate_flags;
u8 flow_mode;
int err; int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy, err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
...@@ -2579,10 +2581,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -2579,10 +2581,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0) if (err < 0)
return err; return err;
flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) { if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; flow_mode &= ~CAKE_FLOW_NAT_FLAG;
q->flow_mode |= CAKE_FLOW_NAT_FLAG * flow_mode |= CAKE_FLOW_NAT_FLAG *
!!nla_get_u32(tb[TCA_CAKE_NAT]); !!nla_get_u32(tb[TCA_CAKE_NAT]);
#else #else
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT], NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
...@@ -2592,29 +2595,34 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -2592,29 +2595,34 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
} }
if (tb[TCA_CAKE_BASE_RATE64]) if (tb[TCA_CAKE_BASE_RATE64])
q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); WRITE_ONCE(q->rate_bps,
nla_get_u64(tb[TCA_CAKE_BASE_RATE64]));
if (tb[TCA_CAKE_DIFFSERV_MODE]) if (tb[TCA_CAKE_DIFFSERV_MODE])
q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); WRITE_ONCE(q->tin_mode,
nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]));
rate_flags = q->rate_flags;
if (tb[TCA_CAKE_WASH]) { if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH])) if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
q->rate_flags |= CAKE_FLAG_WASH; rate_flags |= CAKE_FLAG_WASH;
else else
q->rate_flags &= ~CAKE_FLAG_WASH; rate_flags &= ~CAKE_FLAG_WASH;
} }
if (tb[TCA_CAKE_FLOW_MODE]) if (tb[TCA_CAKE_FLOW_MODE])
q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | flow_mode = ((flow_mode & CAKE_FLOW_NAT_FLAG) |
(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK)); CAKE_FLOW_MASK));
if (tb[TCA_CAKE_ATM]) if (tb[TCA_CAKE_ATM])
q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); WRITE_ONCE(q->atm_mode,
nla_get_u32(tb[TCA_CAKE_ATM]));
if (tb[TCA_CAKE_OVERHEAD]) { if (tb[TCA_CAKE_OVERHEAD]) {
q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); WRITE_ONCE(q->rate_overhead,
q->rate_flags |= CAKE_FLAG_OVERHEAD; nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
rate_flags |= CAKE_FLAG_OVERHEAD;
q->max_netlen = 0; q->max_netlen = 0;
q->max_adjlen = 0; q->max_adjlen = 0;
...@@ -2623,7 +2631,7 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -2623,7 +2631,7 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
} }
if (tb[TCA_CAKE_RAW]) { if (tb[TCA_CAKE_RAW]) {
q->rate_flags &= ~CAKE_FLAG_OVERHEAD; rate_flags &= ~CAKE_FLAG_OVERHEAD;
q->max_netlen = 0; q->max_netlen = 0;
q->max_adjlen = 0; q->max_adjlen = 0;
...@@ -2632,54 +2640,58 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -2632,54 +2640,58 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
} }
if (tb[TCA_CAKE_MPU]) if (tb[TCA_CAKE_MPU])
q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); WRITE_ONCE(q->rate_mpu,
nla_get_u32(tb[TCA_CAKE_MPU]));
if (tb[TCA_CAKE_RTT]) { if (tb[TCA_CAKE_RTT]) {
q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); u32 interval = nla_get_u32(tb[TCA_CAKE_RTT]);
if (!q->interval) WRITE_ONCE(q->interval, max(interval, 1U));
q->interval = 1;
} }
if (tb[TCA_CAKE_TARGET]) { if (tb[TCA_CAKE_TARGET]) {
q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); u32 target = nla_get_u32(tb[TCA_CAKE_TARGET]);
if (!q->target) WRITE_ONCE(q->target, max(target, 1U));
q->target = 1;
} }
if (tb[TCA_CAKE_AUTORATE]) { if (tb[TCA_CAKE_AUTORATE]) {
if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
else else
q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
} }
if (tb[TCA_CAKE_INGRESS]) { if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS])) if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
q->rate_flags |= CAKE_FLAG_INGRESS; rate_flags |= CAKE_FLAG_INGRESS;
else else
q->rate_flags &= ~CAKE_FLAG_INGRESS; rate_flags &= ~CAKE_FLAG_INGRESS;
} }
if (tb[TCA_CAKE_ACK_FILTER]) if (tb[TCA_CAKE_ACK_FILTER])
q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); WRITE_ONCE(q->ack_filter,
nla_get_u32(tb[TCA_CAKE_ACK_FILTER]));
if (tb[TCA_CAKE_MEMORY]) if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); WRITE_ONCE(q->buffer_config_limit,
nla_get_u32(tb[TCA_CAKE_MEMORY]));
if (tb[TCA_CAKE_SPLIT_GSO]) { if (tb[TCA_CAKE_SPLIT_GSO]) {
if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO])) if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
q->rate_flags |= CAKE_FLAG_SPLIT_GSO; rate_flags |= CAKE_FLAG_SPLIT_GSO;
else else
q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
} }
if (tb[TCA_CAKE_FWMARK]) { if (tb[TCA_CAKE_FWMARK]) {
q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK]));
q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; WRITE_ONCE(q->fwmark_shft,
q->fwmark_mask ? __ffs(q->fwmark_mask) : 0);
} }
WRITE_ONCE(q->rate_flags, rate_flags);
WRITE_ONCE(q->flow_mode, flow_mode);
if (q->tins) { if (q->tins) {
sch_tree_lock(sch); sch_tree_lock(sch);
cake_reconfigure(sch); cake_reconfigure(sch);
...@@ -2774,68 +2786,72 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -2774,68 +2786,72 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct cake_sched_data *q = qdisc_priv(sch); struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *opts; struct nlattr *opts;
u16 rate_flags;
u8 flow_mode;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS); opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!opts) if (!opts)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64,
TCA_CAKE_PAD)) READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, flow_mode = READ_ONCE(q->flow_mode);
q->flow_mode & CAKE_FLOW_MASK)) if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, flow_mode & CAKE_FLOW_MASK))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) if (nla_put_u32(skb, TCA_CAKE_MEMORY,
READ_ONCE(q->buffer_config_limit)))
goto nla_put_failure; goto nla_put_failure;
rate_flags = READ_ONCE(q->rate_flags);
if (nla_put_u32(skb, TCA_CAKE_AUTORATE, if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
!!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) !!(rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_INGRESS, if (nla_put_u32(skb, TCA_CAKE_INGRESS,
!!(q->rate_flags & CAKE_FLAG_INGRESS))) !!(rate_flags & CAKE_FLAG_INGRESS)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_NAT, if (nla_put_u32(skb, TCA_CAKE_NAT,
!!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) !!(flow_mode & CAKE_FLOW_NAT_FLAG)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_WASH, if (nla_put_u32(skb, TCA_CAKE_WASH,
!!(q->rate_flags & CAKE_FLAG_WASH))) !!(rate_flags & CAKE_FLAG_WASH)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead)))
goto nla_put_failure; goto nla_put_failure;
if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) if (!(rate_flags & CAKE_FLAG_OVERHEAD))
if (nla_put_u32(skb, TCA_CAKE_RAW, 0)) if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO, if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
!!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) !!(rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -389,11 +389,11 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -389,11 +389,11 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
} }
/* Everything went OK, save the parameters used. */ /* Everything went OK, save the parameters used. */
q->hicredit = qopt->hicredit; WRITE_ONCE(q->hicredit, qopt->hicredit);
q->locredit = qopt->locredit; WRITE_ONCE(q->locredit, qopt->locredit);
q->idleslope = qopt->idleslope * BYTES_PER_KBIT; WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT);
q->sendslope = qopt->sendslope * BYTES_PER_KBIT; WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT);
q->offload = qopt->offload; WRITE_ONCE(q->offload, qopt->offload);
return 0; return 0;
} }
...@@ -459,11 +459,11 @@ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -459,11 +459,11 @@ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!nest) if (!nest)
goto nla_put_failure; goto nla_put_failure;
opt.hicredit = q->hicredit; opt.hicredit = READ_ONCE(q->hicredit);
opt.locredit = q->locredit; opt.locredit = READ_ONCE(q->locredit);
opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT); opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT);
opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT); opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT);
opt.offload = q->offload; opt.offload = READ_ONCE(q->offload);
if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt)) if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
goto nla_put_failure; goto nla_put_failure;
......
...@@ -405,8 +405,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -405,8 +405,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
} else } else
sch_tree_lock(sch); sch_tree_lock(sch);
q->flags = ctl->flags; WRITE_ONCE(q->flags, ctl->flags);
q->limit = ctl->limit; WRITE_ONCE(q->limit, ctl->limit);
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log, ctl->Plog, ctl->Scell_log,
...@@ -431,15 +431,16 @@ static int choke_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -431,15 +431,16 @@ static int choke_init(struct Qdisc *sch, struct nlattr *opt,
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
u8 Wlog = READ_ONCE(q->parms.Wlog);
struct nlattr *opts = NULL; struct nlattr *opts = NULL;
struct tc_red_qopt opt = { struct tc_red_qopt opt = {
.limit = q->limit, .limit = READ_ONCE(q->limit),
.flags = q->flags, .flags = READ_ONCE(q->flags),
.qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_min = READ_ONCE(q->parms.qth_min) >> Wlog,
.qth_max = q->parms.qth_max >> q->parms.Wlog, .qth_max = READ_ONCE(q->parms.qth_max) >> Wlog,
.Wlog = q->parms.Wlog, .Wlog = Wlog,
.Plog = q->parms.Plog, .Plog = READ_ONCE(q->parms.Plog),
.Scell_log = q->parms.Scell_log, .Scell_log = READ_ONCE(q->parms.Scell_log),
}; };
opts = nla_nest_start_noflag(skb, TCA_OPTIONS); opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
...@@ -447,7 +448,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -447,7 +448,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -118,26 +118,31 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -118,26 +118,31 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_CODEL_TARGET]) { if (tb[TCA_CODEL_TARGET]) {
u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->params.target,
((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_CODEL_CE_THRESHOLD]) { if (tb[TCA_CODEL_CE_THRESHOLD]) {
u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]); u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->params.ce_threshold,
(val * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_CODEL_INTERVAL]) { if (tb[TCA_CODEL_INTERVAL]) {
u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->params.interval,
((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_CODEL_LIMIT]) if (tb[TCA_CODEL_LIMIT])
sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); WRITE_ONCE(sch->limit,
nla_get_u32(tb[TCA_CODEL_LIMIT]));
if (tb[TCA_CODEL_ECN]) if (tb[TCA_CODEL_ECN])
q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); WRITE_ONCE(q->params.ecn,
!!nla_get_u32(tb[TCA_CODEL_ECN]));
qlen = sch->q.qlen; qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
...@@ -183,6 +188,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -183,6 +188,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt,
static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct codel_sched_data *q = qdisc_priv(sch); struct codel_sched_data *q = qdisc_priv(sch);
codel_time_t ce_threshold;
struct nlattr *opts; struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS); opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
...@@ -190,17 +196,18 @@ static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -190,17 +196,18 @@ static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_CODEL_TARGET, if (nla_put_u32(skb, TCA_CODEL_TARGET,
codel_time_to_us(q->params.target)) || codel_time_to_us(READ_ONCE(q->params.target))) ||
nla_put_u32(skb, TCA_CODEL_LIMIT, nla_put_u32(skb, TCA_CODEL_LIMIT,
sch->limit) || READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_CODEL_INTERVAL, nla_put_u32(skb, TCA_CODEL_INTERVAL,
codel_time_to_us(q->params.interval)) || codel_time_to_us(READ_ONCE(q->params.interval))) ||
nla_put_u32(skb, TCA_CODEL_ECN, nla_put_u32(skb, TCA_CODEL_ECN,
q->params.ecn)) READ_ONCE(q->params.ecn)))
goto nla_put_failure; goto nla_put_failure;
if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD && ce_threshold = READ_ONCE(q->params.ce_threshold);
if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD, nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
codel_time_to_us(q->params.ce_threshold))) codel_time_to_us(ce_threshold)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -467,15 +467,15 @@ static int etf_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -467,15 +467,15 @@ static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!nest) if (!nest)
goto nla_put_failure; goto nla_put_failure;
opt.delta = q->delta; opt.delta = READ_ONCE(q->delta);
opt.clockid = q->clockid; opt.clockid = READ_ONCE(q->clockid);
if (q->offload) if (READ_ONCE(q->offload))
opt.flags |= TC_ETF_OFFLOAD_ON; opt.flags |= TC_ETF_OFFLOAD_ON;
if (q->deadline_mode) if (READ_ONCE(q->deadline_mode))
opt.flags |= TC_ETF_DEADLINE_MODE_ON; opt.flags |= TC_ETF_DEADLINE_MODE_ON;
if (q->skip_sock_check) if (READ_ONCE(q->skip_sock_check))
opt.flags |= TC_ETF_SKIP_SOCK_CHECK; opt.flags |= TC_ETF_SKIP_SOCK_CHECK;
if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt)) if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
......
...@@ -646,7 +646,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -646,7 +646,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch); sch_tree_lock(sch);
q->nbands = nbands; WRITE_ONCE(q->nbands, nbands);
for (i = nstrict; i < q->nstrict; i++) { for (i = nstrict; i < q->nstrict; i++) {
if (q->classes[i].qdisc->q.qlen) { if (q->classes[i].qdisc->q.qlen) {
list_add_tail(&q->classes[i].alist, &q->active); list_add_tail(&q->classes[i].alist, &q->active);
...@@ -658,11 +658,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -658,11 +658,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
list_del(&q->classes[i].alist); list_del(&q->classes[i].alist);
qdisc_tree_flush_backlog(q->classes[i].qdisc); qdisc_tree_flush_backlog(q->classes[i].qdisc);
} }
q->nstrict = nstrict; WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap)); memcpy(q->prio2band, priomap, sizeof(priomap));
for (i = 0; i < q->nbands; i++) for (i = 0; i < q->nbands; i++)
q->classes[i].quantum = quanta[i]; WRITE_ONCE(q->classes[i].quantum, quanta[i]);
for (i = oldbands; i < q->nbands; i++) { for (i = oldbands; i < q->nbands; i++) {
q->classes[i].qdisc = queues[i]; q->classes[i].qdisc = queues[i];
...@@ -676,7 +676,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -676,7 +676,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
for (i = q->nbands; i < oldbands; i++) { for (i = q->nbands; i < oldbands; i++) {
qdisc_put(q->classes[i].qdisc); qdisc_put(q->classes[i].qdisc);
q->classes[i].qdisc = NULL; q->classes[i].qdisc = NULL;
q->classes[i].quantum = 0; WRITE_ONCE(q->classes[i].quantum, 0);
q->classes[i].deficit = 0; q->classes[i].deficit = 0;
gnet_stats_basic_sync_init(&q->classes[i].bstats); gnet_stats_basic_sync_init(&q->classes[i].bstats);
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
...@@ -733,6 +733,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -733,6 +733,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
struct ets_sched *q = qdisc_priv(sch); struct ets_sched *q = qdisc_priv(sch);
struct nlattr *opts; struct nlattr *opts;
struct nlattr *nest; struct nlattr *nest;
u8 nbands, nstrict;
int band; int band;
int prio; int prio;
int err; int err;
...@@ -745,21 +746,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -745,21 +746,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
if (!opts) if (!opts)
goto nla_err; goto nla_err;
if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands)) nbands = READ_ONCE(q->nbands);
if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands))
goto nla_err; goto nla_err;
if (q->nstrict && nstrict = READ_ONCE(q->nstrict);
nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict)) if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict))
goto nla_err; goto nla_err;
if (q->nbands > q->nstrict) { if (nbands > nstrict) {
nest = nla_nest_start(skb, TCA_ETS_QUANTA); nest = nla_nest_start(skb, TCA_ETS_QUANTA);
if (!nest) if (!nest)
goto nla_err; goto nla_err;
for (band = q->nstrict; band < q->nbands; band++) { for (band = nstrict; band < nbands; band++) {
if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
q->classes[band].quantum)) READ_ONCE(q->classes[band].quantum)))
goto nla_err; goto nla_err;
} }
...@@ -771,7 +773,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -771,7 +773,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_err; goto nla_err;
for (prio = 0; prio <= TC_PRIO_MAX; prio++) { for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio])) if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND,
READ_ONCE(q->prio2band[prio])))
goto nla_err; goto nla_err;
} }
......
...@@ -19,7 +19,8 @@ ...@@ -19,7 +19,8 @@
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
...@@ -28,7 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -28,7 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
if (likely(sch->q.qlen < sch->limit)) if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
...@@ -39,7 +40,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -39,7 +40,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{ {
unsigned int prev_backlog; unsigned int prev_backlog;
if (likely(sch->q.qlen < sch->limit)) if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
prev_backlog = sch->qstats.backlog; prev_backlog = sch->qstats.backlog;
...@@ -105,14 +106,14 @@ static int __fifo_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -105,14 +106,14 @@ static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
if (is_bfifo) if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch)); limit *= psched_mtu(qdisc_dev(sch));
sch->limit = limit; WRITE_ONCE(sch->limit, limit);
} else { } else {
struct tc_fifo_qopt *ctl = nla_data(opt); struct tc_fifo_qopt *ctl = nla_data(opt);
if (nla_len(opt) < sizeof(*ctl)) if (nla_len(opt) < sizeof(*ctl))
return -EINVAL; return -EINVAL;
sch->limit = ctl->limit; WRITE_ONCE(sch->limit, ctl->limit);
} }
if (is_bfifo) if (is_bfifo)
...@@ -154,7 +155,7 @@ static void fifo_destroy(struct Qdisc *sch) ...@@ -154,7 +155,7 @@ static void fifo_destroy(struct Qdisc *sch)
static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb) static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct tc_fifo_qopt opt = { .limit = sch->limit }; struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure; goto nla_put_failure;
......
...@@ -106,6 +106,8 @@ struct fq_perband_flows { ...@@ -106,6 +106,8 @@ struct fq_perband_flows {
int quantum; /* based on band nr : 576KB, 192KB, 64KB */ int quantum; /* based on band nr : 576KB, 192KB, 64KB */
}; };
#define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2)
struct fq_sched_data { struct fq_sched_data {
/* Read mostly cache line */ /* Read mostly cache line */
...@@ -122,7 +124,7 @@ struct fq_sched_data { ...@@ -122,7 +124,7 @@ struct fq_sched_data {
u8 rate_enable; u8 rate_enable;
u8 fq_trees_log; u8 fq_trees_log;
u8 horizon_drop; u8 horizon_drop;
u8 prio2band[(TC_PRIO_MAX + 1) >> 2]; u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE];
u32 timer_slack; /* hrtimer slack in ns */ u32 timer_slack; /* hrtimer slack in ns */
/* Read/Write fields. */ /* Read/Write fields. */
...@@ -159,7 +161,7 @@ struct fq_sched_data { ...@@ -159,7 +161,7 @@ struct fq_sched_data {
/* return the i-th 2-bit value ("crumb") */ /* return the i-th 2-bit value ("crumb") */
static u8 fq_prio2band(const u8 *prio2band, unsigned int prio) static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
{ {
return (prio2band[prio / 4] >> (2 * (prio & 0x3))) & 0x3; return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3;
} }
/* /*
...@@ -888,7 +890,7 @@ static int fq_resize(struct Qdisc *sch, u32 log) ...@@ -888,7 +890,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
q->fq_root = array; q->fq_root = array;
q->fq_trees_log = log; WRITE_ONCE(q->fq_trees_log, log);
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -927,11 +929,15 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { ...@@ -927,11 +929,15 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
static void fq_prio2band_compress_crumb(const u8 *in, u8 *out) static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
{ {
const int num_elems = TC_PRIO_MAX + 1; const int num_elems = TC_PRIO_MAX + 1;
u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE];
int i; int i;
memset(out, 0, num_elems / 4); memset(tmp, 0, sizeof(tmp));
for (i = 0; i < num_elems; i++) for (i = 0; i < num_elems; i++)
out[i / 4] |= in[i] << (2 * (i & 0x3)); tmp[i / 4] |= in[i] << (2 * (i & 0x3));
for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++)
WRITE_ONCE(out[i], tmp[i]);
} }
static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out) static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
...@@ -958,7 +964,7 @@ static int fq_load_weights(struct fq_sched_data *q, ...@@ -958,7 +964,7 @@ static int fq_load_weights(struct fq_sched_data *q,
} }
} }
for (i = 0; i < FQ_BANDS; i++) for (i = 0; i < FQ_BANDS; i++)
q->band_flows[i].quantum = weights[i]; WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
return 0; return 0;
} }
...@@ -1011,16 +1017,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1011,16 +1017,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = -EINVAL; err = -EINVAL;
} }
if (tb[TCA_FQ_PLIMIT]) if (tb[TCA_FQ_PLIMIT])
sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); WRITE_ONCE(sch->limit,
nla_get_u32(tb[TCA_FQ_PLIMIT]));
if (tb[TCA_FQ_FLOW_PLIMIT]) if (tb[TCA_FQ_FLOW_PLIMIT])
q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); WRITE_ONCE(q->flow_plimit,
nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]));
if (tb[TCA_FQ_QUANTUM]) { if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
if (quantum > 0 && quantum <= (1 << 20)) { if (quantum > 0 && quantum <= (1 << 20)) {
q->quantum = quantum; WRITE_ONCE(q->quantum, quantum);
} else { } else {
NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
err = -EINVAL; err = -EINVAL;
...@@ -1028,7 +1036,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1028,7 +1036,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
} }
if (tb[TCA_FQ_INITIAL_QUANTUM]) if (tb[TCA_FQ_INITIAL_QUANTUM])
q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); WRITE_ONCE(q->initial_quantum,
nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]));
if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
...@@ -1037,17 +1046,19 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1037,17 +1046,19 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_FLOW_MAX_RATE]) { if (tb[TCA_FQ_FLOW_MAX_RATE]) {
u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
q->flow_max_rate = (rate == ~0U) ? ~0UL : rate; WRITE_ONCE(q->flow_max_rate,
(rate == ~0U) ? ~0UL : rate);
} }
if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
q->low_rate_threshold = WRITE_ONCE(q->low_rate_threshold,
nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]); nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]));
if (tb[TCA_FQ_RATE_ENABLE]) { if (tb[TCA_FQ_RATE_ENABLE]) {
u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
if (enable <= 1) if (enable <= 1)
q->rate_enable = enable; WRITE_ONCE(q->rate_enable,
enable);
else else
err = -EINVAL; err = -EINVAL;
} }
...@@ -1055,7 +1066,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1055,7 +1066,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
q->flow_refill_delay = usecs_to_jiffies(usecs_delay); WRITE_ONCE(q->flow_refill_delay,
usecs_to_jiffies(usecs_delay));
} }
if (!err && tb[TCA_FQ_PRIOMAP]) if (!err && tb[TCA_FQ_PRIOMAP])
...@@ -1065,21 +1077,26 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1065,21 +1077,26 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack); err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
if (tb[TCA_FQ_ORPHAN_MASK]) if (tb[TCA_FQ_ORPHAN_MASK])
q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); WRITE_ONCE(q->orphan_mask,
nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]));
if (tb[TCA_FQ_CE_THRESHOLD]) if (tb[TCA_FQ_CE_THRESHOLD])
q->ce_threshold = (u64)NSEC_PER_USEC * WRITE_ONCE(q->ce_threshold,
nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]); (u64)NSEC_PER_USEC *
nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]));
if (tb[TCA_FQ_TIMER_SLACK]) if (tb[TCA_FQ_TIMER_SLACK])
q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]); WRITE_ONCE(q->timer_slack,
nla_get_u32(tb[TCA_FQ_TIMER_SLACK]));
if (tb[TCA_FQ_HORIZON]) if (tb[TCA_FQ_HORIZON])
q->horizon = (u64)NSEC_PER_USEC * WRITE_ONCE(q->horizon,
nla_get_u32(tb[TCA_FQ_HORIZON]); (u64)NSEC_PER_USEC *
nla_get_u32(tb[TCA_FQ_HORIZON]));
if (tb[TCA_FQ_HORIZON_DROP]) if (tb[TCA_FQ_HORIZON_DROP])
q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]); WRITE_ONCE(q->horizon_drop,
nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
if (!err) { if (!err) {
...@@ -1160,13 +1177,13 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -1160,13 +1177,13 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
u64 ce_threshold = q->ce_threshold;
struct tc_prio_qopt prio = { struct tc_prio_qopt prio = {
.bands = FQ_BANDS, .bands = FQ_BANDS,
}; };
u64 horizon = q->horizon;
struct nlattr *opts; struct nlattr *opts;
u64 ce_threshold;
s32 weights[3]; s32 weights[3];
u64 horizon;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS); opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL) if (opts == NULL)
...@@ -1174,35 +1191,48 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1174,35 +1191,48 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
ce_threshold = READ_ONCE(q->ce_threshold);
do_div(ce_threshold, NSEC_PER_USEC); do_div(ce_threshold, NSEC_PER_USEC);
horizon = READ_ONCE(q->horizon);
do_div(horizon, NSEC_PER_USEC); do_div(horizon, NSEC_PER_USEC);
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || if (nla_put_u32(skb, TCA_FQ_PLIMIT,
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || READ_ONCE(q->flow_plimit)) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || nla_put_u32(skb, TCA_FQ_QUANTUM,
READ_ONCE(q->quantum)) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM,
READ_ONCE(q->initial_quantum)) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE,
READ_ONCE(q->rate_enable)) ||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
min_t(unsigned long, q->flow_max_rate, ~0U)) || min_t(unsigned long,
READ_ONCE(q->flow_max_rate), ~0U)) ||
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) || jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || nla_put_u32(skb, TCA_FQ_ORPHAN_MASK,
READ_ONCE(q->orphan_mask)) ||
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
q->low_rate_threshold) || READ_ONCE(q->low_rate_threshold)) ||
nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) || nla_put_u32(skb, TCA_FQ_BUCKETS_LOG,
nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) || READ_ONCE(q->fq_trees_log)) ||
nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
READ_ONCE(q->timer_slack)) ||
nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) || nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop)) nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
READ_ONCE(q->horizon_drop)))
goto nla_put_failure; goto nla_put_failure;
fq_prio2band_decompress_crumb(q->prio2band, prio.priomap); fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio)) if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
goto nla_put_failure; goto nla_put_failure;
weights[0] = q->band_flows[0].quantum; weights[0] = READ_ONCE(q->band_flows[0].quantum);
weights[1] = q->band_flows[1].quantum; weights[1] = READ_ONCE(q->band_flows[1].quantum);
weights[2] = q->band_flows[2].quantum; weights[2] = READ_ONCE(q->band_flows[2].quantum);
if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights)) if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
goto nla_put_failure; goto nla_put_failure;
......
...@@ -396,40 +396,49 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -396,40 +396,49 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_CODEL_TARGET]) { if (tb[TCA_FQ_CODEL_TARGET]) {
u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->cparams.target,
(target * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->cparams.ce_threshold,
(val * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]) if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]); WRITE_ONCE(q->cparams.ce_threshold_selector,
nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]));
if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]) if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]); WRITE_ONCE(q->cparams.ce_threshold_mask,
nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]));
if (tb[TCA_FQ_CODEL_INTERVAL]) { if (tb[TCA_FQ_CODEL_INTERVAL]) {
u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; WRITE_ONCE(q->cparams.interval,
(interval * NSEC_PER_USEC) >> CODEL_SHIFT);
} }
if (tb[TCA_FQ_CODEL_LIMIT]) if (tb[TCA_FQ_CODEL_LIMIT])
sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); WRITE_ONCE(sch->limit,
nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]));
if (tb[TCA_FQ_CODEL_ECN]) if (tb[TCA_FQ_CODEL_ECN])
q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); WRITE_ONCE(q->cparams.ecn,
!!nla_get_u32(tb[TCA_FQ_CODEL_ECN]));
if (quantum) if (quantum)
q->quantum = quantum; WRITE_ONCE(q->quantum, quantum);
if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); WRITE_ONCE(q->drop_batch_size,
max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])));
if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); WRITE_ONCE(q->memory_limit,
min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])));
while (sch->q.qlen > sch->limit || while (sch->q.qlen > sch->limit ||
q->memory_usage > q->memory_limit) { q->memory_usage > q->memory_limit) {
...@@ -522,6 +531,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -522,6 +531,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
codel_time_t ce_threshold;
struct nlattr *opts; struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS); opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
...@@ -529,30 +539,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -529,30 +539,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
codel_time_to_us(q->cparams.target)) || codel_time_to_us(READ_ONCE(q->cparams.target))) ||
nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
sch->limit) || READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
codel_time_to_us(q->cparams.interval)) || codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
nla_put_u32(skb, TCA_FQ_CODEL_ECN, nla_put_u32(skb, TCA_FQ_CODEL_ECN,
q->cparams.ecn) || READ_ONCE(q->cparams.ecn)) ||
nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
q->quantum) || READ_ONCE(q->quantum)) ||
nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
q->drop_batch_size) || READ_ONCE(q->drop_batch_size)) ||
nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
q->memory_limit) || READ_ONCE(q->memory_limit)) ||
nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
q->flows_cnt)) READ_ONCE(q->flows_cnt)))
goto nla_put_failure; goto nla_put_failure;
if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) { ce_threshold = READ_ONCE(q->cparams.ce_threshold);
if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
codel_time_to_us(q->cparams.ce_threshold))) codel_time_to_us(ce_threshold)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector)) if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
READ_ONCE(q->cparams.ce_threshold_selector)))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask)) if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
READ_ONCE(q->cparams.ce_threshold_mask)))
goto nla_put_failure; goto nla_put_failure;
} }
......
...@@ -299,8 +299,8 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -299,8 +299,8 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_PIE_LIMIT]) { if (tb[TCA_FQ_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]); u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
q->p_params.limit = limit; WRITE_ONCE(q->p_params.limit, limit);
sch->limit = limit; WRITE_ONCE(sch->limit, limit);
} }
if (tb[TCA_FQ_PIE_FLOWS]) { if (tb[TCA_FQ_PIE_FLOWS]) {
if (q->flows) { if (q->flows) {
...@@ -322,39 +322,45 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -322,39 +322,45 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]); u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
/* convert to pschedtime */ /* convert to pschedtime */
q->p_params.target = WRITE_ONCE(q->p_params.target,
PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
} }
/* tupdate is in jiffies */ /* tupdate is in jiffies */
if (tb[TCA_FQ_PIE_TUPDATE]) if (tb[TCA_FQ_PIE_TUPDATE])
q->p_params.tupdate = WRITE_ONCE(q->p_params.tupdate,
usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])); usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
if (tb[TCA_FQ_PIE_ALPHA]) if (tb[TCA_FQ_PIE_ALPHA])
q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]); WRITE_ONCE(q->p_params.alpha,
nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
if (tb[TCA_FQ_PIE_BETA]) if (tb[TCA_FQ_PIE_BETA])
q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]); WRITE_ONCE(q->p_params.beta,
nla_get_u32(tb[TCA_FQ_PIE_BETA]));
if (tb[TCA_FQ_PIE_QUANTUM]) if (tb[TCA_FQ_PIE_QUANTUM])
q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]); WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
if (tb[TCA_FQ_PIE_MEMORY_LIMIT]) if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]); WRITE_ONCE(q->memory_limit,
nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
if (tb[TCA_FQ_PIE_ECN_PROB]) if (tb[TCA_FQ_PIE_ECN_PROB])
q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]); WRITE_ONCE(q->ecn_prob,
nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
if (tb[TCA_FQ_PIE_ECN]) if (tb[TCA_FQ_PIE_ECN])
q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]); WRITE_ONCE(q->p_params.ecn,
nla_get_u32(tb[TCA_FQ_PIE_ECN]));
if (tb[TCA_FQ_PIE_BYTEMODE]) if (tb[TCA_FQ_PIE_BYTEMODE])
q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]); WRITE_ONCE(q->p_params.bytemode,
nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]) if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
q->p_params.dq_rate_estimator = WRITE_ONCE(q->p_params.dq_rate_estimator,
nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]); nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */ /* Drop excess packets if new limit is lower */
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
...@@ -471,22 +477,23 @@ static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -471,22 +477,23 @@ static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
return -EMSGSIZE; return -EMSGSIZE;
/* convert target from pschedtime to us */ /* convert target from pschedtime to us */
if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) || if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) || nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
nla_put_u32(skb, TCA_FQ_PIE_TARGET, nla_put_u32(skb, TCA_FQ_PIE_TARGET,
((u32)PSCHED_TICKS2NS(q->p_params.target)) / ((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
NSEC_PER_USEC) || NSEC_PER_USEC) ||
nla_put_u32(skb, TCA_FQ_PIE_TUPDATE, nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
jiffies_to_usecs(q->p_params.tupdate)) || jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) || nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) || nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) || nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT,
nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) || READ_ONCE(q->memory_limit)) ||
nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) || nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) || nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR, nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
q->p_params.dq_rate_estimator)) READ_ONCE(q->p_params.dq_rate_estimator)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -1174,7 +1174,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -1174,7 +1174,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
/* classification failed, try default class */ /* classification failed, try default class */
cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle),
READ_ONCE(q->defcls)), sch);
if (cl == NULL || cl->level > 0) if (cl == NULL || cl->level > 0)
return NULL; return NULL;
...@@ -1443,9 +1444,7 @@ hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt, ...@@ -1443,9 +1444,7 @@ hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL; return -EINVAL;
qopt = nla_data(opt); qopt = nla_data(opt);
sch_tree_lock(sch); WRITE_ONCE(q->defcls, qopt->defcls);
q->defcls = qopt->defcls;
sch_tree_unlock(sch);
return 0; return 0;
} }
...@@ -1525,7 +1524,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1525,7 +1524,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt; struct tc_hfsc_qopt qopt;
qopt.defcls = q->defcls; qopt.defcls = READ_ONCE(q->defcls);
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
goto nla_put_failure; goto nla_put_failure;
return skb->len; return skb->len;
......
...@@ -534,27 +534,31 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -534,27 +534,31 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch); sch_tree_lock(sch);
if (tb[TCA_HHF_BACKLOG_LIMIT]) if (tb[TCA_HHF_BACKLOG_LIMIT])
sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); WRITE_ONCE(sch->limit, nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]));
q->quantum = new_quantum; WRITE_ONCE(q->quantum, new_quantum);
q->hhf_non_hh_weight = new_hhf_non_hh_weight; WRITE_ONCE(q->hhf_non_hh_weight, new_hhf_non_hh_weight);
if (tb[TCA_HHF_HH_FLOWS_LIMIT]) if (tb[TCA_HHF_HH_FLOWS_LIMIT])
q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); WRITE_ONCE(q->hh_flows_limit,
nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]));
if (tb[TCA_HHF_RESET_TIMEOUT]) { if (tb[TCA_HHF_RESET_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]); u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
q->hhf_reset_timeout = usecs_to_jiffies(us); WRITE_ONCE(q->hhf_reset_timeout,
usecs_to_jiffies(us));
} }
if (tb[TCA_HHF_ADMIT_BYTES]) if (tb[TCA_HHF_ADMIT_BYTES])
q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); WRITE_ONCE(q->hhf_admit_bytes,
nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]));
if (tb[TCA_HHF_EVICT_TIMEOUT]) { if (tb[TCA_HHF_EVICT_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]); u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
q->hhf_evict_timeout = usecs_to_jiffies(us); WRITE_ONCE(q->hhf_evict_timeout,
usecs_to_jiffies(us));
} }
qlen = sch->q.qlen; qlen = sch->q.qlen;
...@@ -657,15 +661,18 @@ static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -657,15 +661,18 @@ static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL) if (opts == NULL)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) || if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_HHF_QUANTUM, READ_ONCE(q->quantum)) ||
nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT,
READ_ONCE(q->hh_flows_limit)) ||
nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT, nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
jiffies_to_usecs(q->hhf_reset_timeout)) || jiffies_to_usecs(READ_ONCE(q->hhf_reset_timeout))) ||
nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || nla_put_u32(skb, TCA_HHF_ADMIT_BYTES,
READ_ONCE(q->hhf_admit_bytes)) ||
nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT, nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
jiffies_to_usecs(q->hhf_evict_timeout)) || jiffies_to_usecs(READ_ONCE(q->hhf_evict_timeout))) ||
nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT,
READ_ONCE(q->hhf_non_hh_weight)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -156,36 +156,38 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -156,36 +156,38 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
/* convert to pschedtime */ /* convert to pschedtime */
q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); WRITE_ONCE(q->params.target,
PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
} }
/* tupdate is in jiffies */ /* tupdate is in jiffies */
if (tb[TCA_PIE_TUPDATE]) if (tb[TCA_PIE_TUPDATE])
q->params.tupdate = WRITE_ONCE(q->params.tupdate,
usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])));
if (tb[TCA_PIE_LIMIT]) { if (tb[TCA_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
q->params.limit = limit; WRITE_ONCE(q->params.limit, limit);
sch->limit = limit; WRITE_ONCE(sch->limit, limit);
} }
if (tb[TCA_PIE_ALPHA]) if (tb[TCA_PIE_ALPHA])
q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA]));
if (tb[TCA_PIE_BETA]) if (tb[TCA_PIE_BETA])
q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA]));
if (tb[TCA_PIE_ECN]) if (tb[TCA_PIE_ECN])
q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN]));
if (tb[TCA_PIE_BYTEMODE]) if (tb[TCA_PIE_BYTEMODE])
q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); WRITE_ONCE(q->params.bytemode,
nla_get_u32(tb[TCA_PIE_BYTEMODE]));
if (tb[TCA_PIE_DQ_RATE_ESTIMATOR]) if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
q->params.dq_rate_estimator = WRITE_ONCE(q->params.dq_rate_estimator,
nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]); nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */ /* Drop excess packets if new limit is lower */
qlen = sch->q.qlen; qlen = sch->q.qlen;
...@@ -469,17 +471,18 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -469,17 +471,18 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
/* convert target from pschedtime to us */ /* convert target from pschedtime to us */
if (nla_put_u32(skb, TCA_PIE_TARGET, if (nla_put_u32(skb, TCA_PIE_TARGET,
((u32)PSCHED_TICKS2NS(q->params.target)) / ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) /
NSEC_PER_USEC) || NSEC_PER_USEC) ||
nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_PIE_TUPDATE, nla_put_u32(skb, TCA_PIE_TUPDATE,
jiffies_to_usecs(q->params.tupdate)) || jiffies_to_usecs(READ_ONCE(q->params.tupdate))) ||
nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) ||
nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) ||
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) || nla_put_u32(skb, TCA_PIE_BYTEMODE,
READ_ONCE(q->params.bytemode)) ||
nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR, nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
q->params.dq_rate_estimator)) READ_ONCE(q->params.dq_rate_estimator)))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
......
...@@ -79,7 +79,9 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -79,7 +79,9 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
prio = min(skb->priority, max_priority); prio = min(skb->priority, max_priority);
qdisc = &q->qdiscs[prio]; qdisc = &q->qdiscs[prio];
if (sch->q.qlen < sch->limit) {
/* sch->limit can change under us from skbprio_change() */
if (sch->q.qlen < READ_ONCE(sch->limit)) {
__skb_queue_tail(qdisc, skb); __skb_queue_tail(qdisc, skb);
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
q->qstats[prio].backlog += qdisc_pkt_len(skb); q->qstats[prio].backlog += qdisc_pkt_len(skb);
...@@ -172,7 +174,7 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -172,7 +174,7 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
if (opt->nla_len != nla_attr_size(sizeof(*ctl))) if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
return -EINVAL; return -EINVAL;
sch->limit = ctl->limit; WRITE_ONCE(sch->limit, ctl->limit);
return 0; return 0;
} }
...@@ -200,7 +202,7 @@ static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -200,7 +202,7 @@ static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct tc_skbprio_qopt opt; struct tc_skbprio_qopt opt;
opt.limit = sch->limit; opt.limit = READ_ONCE(sch->limit);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
return -1; return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment