Commit 8f752224 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-summer-cleanup-part-1-mainly-in-exts-area'

Jiri Pirko says:

====================
net: sched: summer cleanup part 1, mainly in exts area

This patchset is one of the couple cleanup patchsets I have in queue.
The motivation aside the obvious need to "make things nicer" is also
to prepare for shared filter blocks introduction. That requires tp->q
removal, and therefore removal of all tp->q users.

Patch 1 is just some small thing I spotted on the way
Patch 2 removes one user of tp->q, namely tcf_em_tree_change
Patches 3-8 do preparations for exts->nr_actions removal
Patches 9-10 do simple renames of functions in cls*
Patches 11-19 remove unnecessary calls of tcf_exts_change helper
The last patch changes tcf_exts_change to don't take lock

Tested by tools/testing/selftests/tc-testing

v1->v2:
- removed conversion of action array to list as noted by Cong
- added the past patch instead
- small rebases of patches 11-19
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 967b2e2a 9b0d4446
...@@ -96,7 +96,7 @@ static int fill_action_fields(struct adapter *adap, ...@@ -96,7 +96,7 @@ static int fill_action_fields(struct adapter *adap,
LIST_HEAD(actions); LIST_HEAD(actions);
exts = cls->knode.exts; exts = cls->knode.exts;
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
tcf_exts_to_list(exts, &actions); tcf_exts_to_list(exts, &actions);
......
...@@ -8953,7 +8953,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, ...@@ -8953,7 +8953,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
LIST_HEAD(actions); LIST_HEAD(actions);
int err; int err;
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
tcf_exts_to_list(exts, &actions); tcf_exts_to_list(exts, &actions);
......
...@@ -1326,7 +1326,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -1326,7 +1326,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions); LIST_HEAD(actions);
int err; int err;
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
...@@ -1839,7 +1839,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -1839,7 +1839,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
bool encap = false; bool encap = false;
int err = 0; int err = 0;
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
memset(attr, 0, sizeof(*attr)); memset(attr, 0, sizeof(*attr));
......
...@@ -1626,7 +1626,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1626,7 +1626,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
LIST_HEAD(actions); LIST_HEAD(actions);
int err; int err;
if (!tc_single_action(cls->exts)) { if (!tcf_exts_has_one_action(cls->exts)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -53,7 +53,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, ...@@ -53,7 +53,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
LIST_HEAD(actions); LIST_HEAD(actions);
int err; int err;
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return 0; return 0;
/* Count action is inserted first */ /* Count action is inserted first */
......
...@@ -115,14 +115,14 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) ...@@ -115,14 +115,14 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
/* TC direct action */ /* TC direct action */
if (cls_bpf->exts_integrated) { if (cls_bpf->exts_integrated) {
if (tc_no_actions(cls_bpf->exts)) if (!tcf_exts_has_actions(cls_bpf->exts))
return NN_ACT_DIRECT; return NN_ACT_DIRECT;
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* TC legacy mode */ /* TC legacy mode */
if (!tc_single_action(cls_bpf->exts)) if (!tcf_exts_has_one_action(cls_bpf->exts))
return -EOPNOTSUPP; return -EOPNOTSUPP;
tcf_exts_to_list(cls_bpf->exts, &actions); tcf_exts_to_list(cls_bpf->exts, &actions);
......
...@@ -113,36 +113,6 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) ...@@ -113,36 +113,6 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
return 0; return 0;
} }
/**
* tcf_exts_is_predicative - check if a predicative extension is present
* @exts: tc filter extensions handle
*
* Returns 1 if a predicative extension is present, i.e. an extension which
* might cause further actions and thus overrule the regular tcf_result.
*/
static inline int
tcf_exts_is_predicative(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->nr_actions;
#else
return 0;
#endif
}
/**
* tcf_exts_is_available - check if at least one extension is present
* @exts: tc filter extensions handle
*
* Returns 1 if at least one extension is present.
*/
static inline int
tcf_exts_is_available(struct tcf_exts *exts)
{
/* All non-predicative extensions must be added here. */
return tcf_exts_is_predicative(exts);
}
static inline void tcf_exts_to_list(const struct tcf_exts *exts, static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions) struct list_head *actions)
{ {
...@@ -176,47 +146,62 @@ tcf_exts_stats_update(const struct tcf_exts *exts, ...@@ -176,47 +146,62 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
#endif #endif
} }
/**
* tcf_exts_has_actions - check if at least one action is present
* @exts: tc filter extensions handle
*
* Returns true if at least one action is present.
*/
static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->nr_actions;
#else
return false;
#endif
}
/**
* tcf_exts_has_one_action - check if exactly one action is present
* @exts: tc filter extensions handle
*
* Returns true if exactly one action is present.
*/
static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->nr_actions == 1;
#else
return false;
#endif
}
/** /**
* tcf_exts_exec - execute tc filter extensions * tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer * @skb: socket buffer
* @exts: tc filter extensions handle * @exts: tc filter extensions handle
* @res: desired result * @res: desired result
* *
* Executes all configured extensions. Returns 0 on a normal execution, * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
* a negative number if the filter must be considered unmatched or * a negative number if the filter must be considered unmatched or
* a positive action code (TC_ACT_*) which must be returned to the * a positive action code (TC_ACT_*) which must be returned to the
* underlying layer. * underlying layer.
*/ */
static inline int static inline int
tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
struct tcf_result *res) struct tcf_result *res)
{ {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (exts->nr_actions) return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
return tcf_action_exec(skb, exts->actions, exts->nr_actions,
res);
#endif #endif
return 0; return TC_ACT_OK;
} }
#ifdef CONFIG_NET_CLS_ACT
#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
#else /* CONFIG_NET_CLS_ACT */
#define tc_no_actions(_exts) true
#define tc_single_action(_exts) false
#endif /* CONFIG_NET_CLS_ACT */
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv, struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr); struct tcf_exts *exts, bool ovr);
void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
...@@ -332,26 +317,6 @@ int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); ...@@ -332,26 +317,6 @@ int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *); struct tcf_pkt_info *);
/**
* tcf_em_tree_change - replace ematch tree of a running classifier
*
* @tp: classifier kind handle
* @dst: destination ematch tree variable
* @src: source ematch tree (temporary tree from tcf_em_tree_validate)
*
* This functions replaces the ematch tree in @dst with the ematch
* tree in @src. The classifier in charge of the ematch tree may be
* running.
*/
static inline void tcf_em_tree_change(struct tcf_proto *tp,
struct tcf_ematch_tree *dst,
struct tcf_ematch_tree *src)
{
tcf_tree_lock(tp);
memcpy(dst, src, sizeof(*dst));
tcf_tree_unlock(tp);
}
/** /**
* tcf_em_tree_match - evaulate an ematch tree * tcf_em_tree_match - evaulate an ematch tree
* *
...@@ -386,7 +351,6 @@ struct tcf_ematch_tree { ...@@ -386,7 +351,6 @@ struct tcf_ematch_tree {
#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
#define tcf_em_tree_destroy(t) do { (void)(t); } while(0) #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
#define tcf_em_tree_dump(skb, t, tlv) (0) #define tcf_em_tree_dump(skb, t, tlv) (0)
#define tcf_em_tree_change(tp, dst, src) do { } while(0)
#define tcf_em_tree_match(skb, t, info) ((void)(info), 1) #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
#endif /* CONFIG_NET_EMATCH */ #endif /* CONFIG_NET_EMATCH */
......
...@@ -779,7 +779,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, ...@@ -779,7 +779,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (!ds->ops->port_mirror_add) if (!ds->ops->port_mirror_add)
return err; return err;
if (!tc_single_action(cls->exts)) if (!tcf_exts_has_one_action(cls->exts))
return err; return err;
tcf_exts_to_list(cls->exts, &actions); tcf_exts_to_list(cls->exts, &actions);
......
...@@ -473,9 +473,10 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) ...@@ -473,9 +473,10 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res) int nr_actions, struct tcf_result *res)
{ {
int ret = -1, i;
u32 jmp_prgcnt = 0; u32 jmp_prgcnt = 0;
u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
int i;
int ret = TC_ACT_OK;
if (skb_skip_tc_classify(skb)) if (skb_skip_tc_classify(skb))
return TC_ACT_OK; return TC_ACT_OK;
......
...@@ -883,18 +883,12 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, ...@@ -883,18 +883,12 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
} }
EXPORT_SYMBOL(tcf_exts_validate); EXPORT_SYMBOL(tcf_exts_validate);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
struct tcf_exts *src)
{ {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
struct tcf_exts old = *dst; struct tcf_exts old = *dst;
tcf_tree_lock(tp); *dst = *src;
dst->nr_actions = src->nr_actions;
dst->actions = src->actions;
dst->type = src->type;
tcf_tree_unlock(tp);
tcf_exts_destroy(&old); tcf_exts_destroy(&old);
#endif #endif
} }
...@@ -915,7 +909,7 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) ...@@ -915,7 +909,7 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
struct nlattr *nest; struct nlattr *nest;
if (exts->action && exts->nr_actions) { if (exts->action && tcf_exts_has_actions(exts)) {
/* /*
* again for backward compatible mode - we want * again for backward compatible mode - we want
* to work with both old and new modes of entering * to work with both old and new modes of entering
...@@ -972,7 +966,7 @@ int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, ...@@ -972,7 +966,7 @@ int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
if (tc_no_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
tcf_exts_to_list(exts, &actions); tcf_exts_to_list(exts, &actions);
......
...@@ -129,33 +129,22 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -129,33 +129,22 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr *est, bool ovr) struct nlattr *est, bool ovr)
{ {
int err; int err;
struct tcf_exts e;
struct tcf_ematch_tree t;
err = tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
goto errout;
err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
if (err < 0) if (err < 0)
goto errout; return err;
if (tb[TCA_BASIC_CLASSID]) { if (tb[TCA_BASIC_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
tcf_bind_filter(tp, &f->res, base); tcf_bind_filter(tp, &f->res, base);
} }
tcf_exts_change(tp, &f->exts, &e);
tcf_em_tree_change(tp, &f->ematches, &t);
f->tp = tp; f->tp = tp;
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static int basic_change(struct net *net, struct sk_buff *in_skb, static int basic_change(struct net *net, struct sk_buff *in_skb,
......
...@@ -382,13 +382,11 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, ...@@ -382,13 +382,11 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
return 0; return 0;
} }
static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_bpf_prog *prog, struct cls_bpf_prog *prog, unsigned long base,
unsigned long base, struct nlattr **tb, struct nlattr **tb, struct nlattr *est, bool ovr)
struct nlattr *est, bool ovr)
{ {
bool is_bpf, is_ebpf, have_exts = false; bool is_bpf, is_ebpf, have_exts = false;
struct tcf_exts exts;
u32 gen_flags = 0; u32 gen_flags = 0;
int ret; int ret;
...@@ -397,30 +395,23 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ...@@ -397,30 +395,23 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
return -EINVAL; return -EINVAL;
ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
if (ret < 0)
goto errout;
if (tb[TCA_BPF_FLAGS]) { if (tb[TCA_BPF_FLAGS]) {
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
ret = -EINVAL; return -EINVAL;
goto errout;
}
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
} }
if (tb[TCA_BPF_FLAGS_GEN]) { if (tb[TCA_BPF_FLAGS_GEN]) {
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
!tc_flags_valid(gen_flags)) { !tc_flags_valid(gen_flags))
ret = -EINVAL; return -EINVAL;
goto errout;
}
} }
prog->exts_integrated = have_exts; prog->exts_integrated = have_exts;
...@@ -429,19 +420,14 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ...@@ -429,19 +420,14 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
cls_bpf_prog_from_efd(tb, prog, tp); cls_bpf_prog_from_efd(tb, prog, tp);
if (ret < 0) if (ret < 0)
goto errout; return ret;
if (tb[TCA_BPF_CLASSID]) { if (tb[TCA_BPF_CLASSID]) {
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
tcf_bind_filter(tp, &prog->res, base); tcf_bind_filter(tp, &prog->res, base);
} }
tcf_exts_change(tp, &prog->exts, &exts);
return 0; return 0;
errout:
tcf_exts_destroy(&exts);
return ret;
} }
static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
...@@ -508,8 +494,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, ...@@ -508,8 +494,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
goto errout; goto errout;
} }
ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
ovr);
if (ret < 0) if (ret < 0)
goto errout; goto errout;
......
...@@ -76,8 +76,6 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, ...@@ -76,8 +76,6 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *tb[TCA_CGROUP_MAX + 1]; struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct cls_cgroup_head *head = rtnl_dereference(tp->root);
struct cls_cgroup_head *new; struct cls_cgroup_head *new;
struct tcf_ematch_tree t;
struct tcf_exts e;
int err; int err;
if (!tca[TCA_OPTIONS]) if (!tca[TCA_OPTIONS])
...@@ -103,23 +101,13 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, ...@@ -103,23 +101,13 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (err < 0) if (err < 0)
goto errout; goto errout;
err = tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr);
if (err < 0) if (err < 0)
goto errout; goto errout;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
if (err < 0) {
tcf_exts_destroy(&e);
goto errout;
}
err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches);
if (err < 0) { if (err < 0)
tcf_exts_destroy(&e);
goto errout; goto errout;
}
tcf_exts_change(tp, &new->exts, &e);
tcf_em_tree_change(tp, &new->ematches, &t);
rcu_assign_pointer(tp->root, new); rcu_assign_pointer(tp->root, new);
if (head) if (head)
......
...@@ -388,8 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, ...@@ -388,8 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
struct flow_filter *fold, *fnew; struct flow_filter *fold, *fnew;
struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FLOW_MAX + 1]; struct nlattr *tb[TCA_FLOW_MAX + 1];
struct tcf_exts e;
struct tcf_ematch_tree t;
unsigned int nkeys = 0; unsigned int nkeys = 0;
unsigned int perturb_period = 0; unsigned int perturb_period = 0;
u32 baseclass = 0; u32 baseclass = 0;
...@@ -425,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, ...@@ -425,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
if (err < 0) if (!fnew)
goto err1; return -ENOBUFS;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
if (err < 0)
goto err1;
err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
if (err < 0) if (err < 0)
goto err1; goto err1;
err = -ENOBUFS; err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (err < 0)
if (!fnew)
goto err2; goto err2;
err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
if (err < 0) if (err < 0)
goto err3; goto err2;
fold = (struct flow_filter *)*arg; fold = (struct flow_filter *)*arg;
if (fold) { if (fold) {
err = -EINVAL; err = -EINVAL;
if (fold->handle != handle && handle) if (fold->handle != handle && handle)
goto err3; goto err2;
/* Copy fold into fnew */ /* Copy fold into fnew */
fnew->tp = fold->tp; fnew->tp = fold->tp;
...@@ -469,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, ...@@ -469,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_FLOW_MODE]) if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]); mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1) if (mode != FLOW_MODE_HASH && nkeys > 1)
goto err3; goto err2;
if (mode == FLOW_MODE_HASH) if (mode == FLOW_MODE_HASH)
perturb_period = fold->perturb_period; perturb_period = fold->perturb_period;
if (tb[TCA_FLOW_PERTURB]) { if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH) if (mode != FLOW_MODE_HASH)
goto err3; goto err2;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
} }
} else { } else {
err = -EINVAL; err = -EINVAL;
if (!handle) if (!handle)
goto err3; goto err2;
if (!tb[TCA_FLOW_KEYS]) if (!tb[TCA_FLOW_KEYS])
goto err3; goto err2;
mode = FLOW_MODE_MAP; mode = FLOW_MODE_MAP;
if (tb[TCA_FLOW_MODE]) if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]); mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1) if (mode != FLOW_MODE_HASH && nkeys > 1)
goto err3; goto err2;
if (tb[TCA_FLOW_PERTURB]) { if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH) if (mode != FLOW_MODE_HASH)
goto err3; goto err2;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
} }
...@@ -511,9 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, ...@@ -511,9 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation, setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
(unsigned long)fnew); (unsigned long)fnew);
tcf_exts_change(tp, &fnew->exts, &e);
tcf_em_tree_change(tp, &fnew->ematches, &t);
netif_keep_dst(qdisc_dev(tp->q)); netif_keep_dst(qdisc_dev(tp->q));
if (tb[TCA_FLOW_KEYS]) { if (tb[TCA_FLOW_KEYS]) {
...@@ -552,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, ...@@ -552,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
call_rcu(&fold->rcu, flow_destroy_filter); call_rcu(&fold->rcu, flow_destroy_filter);
return 0; return 0;
err3:
tcf_exts_destroy(&fnew->exts);
err2: err2:
tcf_em_tree_destroy(&t); tcf_exts_destroy(&fnew->exts);
kfree(fnew); tcf_em_tree_destroy(&fnew->ematches);
err1: err1:
tcf_exts_destroy(&e); kfree(fnew);
return err; return err;
} }
......
...@@ -852,15 +852,11 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -852,15 +852,11 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct nlattr **tb, unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr) struct nlattr *est, bool ovr)
{ {
struct tcf_exts e;
int err; int err;
err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0); err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
goto errout;
if (tb[TCA_FLOWER_CLASSID]) { if (tb[TCA_FLOWER_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
...@@ -869,17 +865,12 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -869,17 +865,12 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
err = fl_set_key(net, tb, &f->key, &mask->key); err = fl_set_key(net, tb, &f->key, &mask->key);
if (err) if (err)
goto errout; return err;
fl_mask_update_range(mask); fl_mask_update_range(mask);
fl_set_masked_key(&f->mkey, &f->key, mask); fl_set_masked_key(&f->mkey, &f->key, mask);
tcf_exts_change(tp, &f->exts, &e);
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static u32 fl_grab_new_handle(struct tcf_proto *tp, static u32 fl_grab_new_handle(struct tcf_proto *tp,
......
...@@ -190,22 +190,17 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { ...@@ -190,22 +190,17 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
[TCA_FW_MASK] = { .type = NLA_U32 }, [TCA_FW_MASK] = { .type = NLA_U32 },
}; };
static int static int fw_set_parms(struct net *net, struct tcf_proto *tp,
fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, struct fw_filter *f, struct nlattr **tb,
struct nlattr **tb, struct nlattr **tca, unsigned long base, struct nlattr **tca, unsigned long base, bool ovr)
bool ovr)
{ {
struct fw_head *head = rtnl_dereference(tp->root); struct fw_head *head = rtnl_dereference(tp->root);
struct tcf_exts e;
u32 mask; u32 mask;
int err; int err;
err = tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr);
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
if (err < 0)
goto errout;
if (tb[TCA_FW_CLASSID]) { if (tb[TCA_FW_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
...@@ -216,10 +211,8 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, ...@@ -216,10 +211,8 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
if (tb[TCA_FW_INDEV]) { if (tb[TCA_FW_INDEV]) {
int ret; int ret;
ret = tcf_change_indev(net, tb[TCA_FW_INDEV]); ret = tcf_change_indev(net, tb[TCA_FW_INDEV]);
if (ret < 0) { if (ret < 0)
err = ret; return ret;
goto errout;
}
f->ifindex = ret; f->ifindex = ret;
} }
#endif /* CONFIG_NET_CLS_IND */ #endif /* CONFIG_NET_CLS_IND */
...@@ -228,16 +221,11 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, ...@@ -228,16 +221,11 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
if (tb[TCA_FW_MASK]) { if (tb[TCA_FW_MASK]) {
mask = nla_get_u32(tb[TCA_FW_MASK]); mask = nla_get_u32(tb[TCA_FW_MASK]);
if (mask != head->mask) if (mask != head->mask)
goto errout; return err;
} else if (head->mask != 0xFFFFFFFF) } else if (head->mask != 0xFFFFFFFF)
goto errout; return err;
tcf_exts_change(tp, &f->exts, &e);
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static int fw_change(struct net *net, struct sk_buff *in_skb, static int fw_change(struct net *net, struct sk_buff *in_skb,
...@@ -282,7 +270,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, ...@@ -282,7 +270,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return err; return err;
} }
err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr);
if (err < 0) { if (err < 0) {
tcf_exts_destroy(&fnew->exts); tcf_exts_destroy(&fnew->exts);
kfree(fnew); kfree(fnew);
...@@ -330,7 +318,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, ...@@ -330,7 +318,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
f->id = handle; f->id = handle;
f->tp = tp; f->tp = tp;
err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); err = fw_set_parms(net, tp, f, tb, tca, base, ovr);
if (err < 0) if (err < 0)
goto errout; goto errout;
...@@ -387,7 +375,7 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, ...@@ -387,7 +375,7 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
t->tcm_handle = f->id; t->tcm_handle = f->id;
if (!f->res.classid && !tcf_exts_is_available(&f->exts)) if (!f->res.classid && !tcf_exts_has_actions(&f->exts))
return skb->len; return skb->len;
nest = nla_nest_start(skb, TCA_OPTIONS); nest = nla_nest_start(skb, TCA_OPTIONS);
......
...@@ -120,27 +120,17 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -120,27 +120,17 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct nlattr **tb, unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr) struct nlattr *est, bool ovr)
{ {
struct tcf_exts e;
int err; int err;
err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0); err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
if (err)
return err;
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0) if (err < 0)
goto errout; return err;
if (tb[TCA_MATCHALL_CLASSID]) { if (tb[TCA_MATCHALL_CLASSID]) {
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
tcf_bind_filter(tp, &head->res, base); tcf_bind_filter(tp, &head->res, base);
} }
tcf_exts_change(tp, &head->exts, &e);
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static int mall_change(struct net *net, struct sk_buff *in_skb, static int mall_change(struct net *net, struct sk_buff *in_skb,
......
...@@ -113,7 +113,7 @@ static inline int route4_hash_wild(void) ...@@ -113,7 +113,7 @@ static inline int route4_hash_wild(void)
#define ROUTE4_APPLY_RESULT() \ #define ROUTE4_APPLY_RESULT() \
{ \ { \
*res = f->res; \ *res = f->res; \
if (tcf_exts_is_available(&f->exts)) { \ if (tcf_exts_has_actions(&f->exts)) { \
int r = tcf_exts_exec(skb, &f->exts, res); \ int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) { \ if (r < 0) { \
dont_cache = 1; \ dont_cache = 1; \
...@@ -372,37 +372,32 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -372,37 +372,32 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
struct route4_filter *fp; struct route4_filter *fp;
unsigned int h1; unsigned int h1;
struct route4_bucket *b; struct route4_bucket *b;
struct tcf_exts e;
int err; int err;
err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
goto errout;
err = -EINVAL;
if (tb[TCA_ROUTE4_TO]) { if (tb[TCA_ROUTE4_TO]) {
if (new && handle & 0x8000) if (new && handle & 0x8000)
goto errout; return -EINVAL;
to = nla_get_u32(tb[TCA_ROUTE4_TO]); to = nla_get_u32(tb[TCA_ROUTE4_TO]);
if (to > 0xFF) if (to > 0xFF)
goto errout; return -EINVAL;
nhandle = to; nhandle = to;
} }
if (tb[TCA_ROUTE4_FROM]) { if (tb[TCA_ROUTE4_FROM]) {
if (tb[TCA_ROUTE4_IIF]) if (tb[TCA_ROUTE4_IIF])
goto errout; return -EINVAL;
id = nla_get_u32(tb[TCA_ROUTE4_FROM]); id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
if (id > 0xFF) if (id > 0xFF)
goto errout; return -EINVAL;
nhandle |= id << 16; nhandle |= id << 16;
} else if (tb[TCA_ROUTE4_IIF]) { } else if (tb[TCA_ROUTE4_IIF]) {
id = nla_get_u32(tb[TCA_ROUTE4_IIF]); id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
if (id > 0x7FFF) if (id > 0x7FFF)
goto errout; return -EINVAL;
nhandle |= (id | 0x8000) << 16; nhandle |= (id | 0x8000) << 16;
} else } else
nhandle |= 0xFFFF << 16; nhandle |= 0xFFFF << 16;
...@@ -410,27 +405,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -410,27 +405,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
if (handle && new) { if (handle && new) {
nhandle |= handle & 0x7F00; nhandle |= handle & 0x7F00;
if (nhandle != handle) if (nhandle != handle)
goto errout; return -EINVAL;
} }
h1 = to_hash(nhandle); h1 = to_hash(nhandle);
b = rtnl_dereference(head->table[h1]); b = rtnl_dereference(head->table[h1]);
if (!b) { if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL) if (b == NULL)
goto errout; return -ENOBUFS;
rcu_assign_pointer(head->table[h1], b); rcu_assign_pointer(head->table[h1], b);
} else { } else {
unsigned int h2 = from_hash(nhandle >> 16); unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST;
for (fp = rtnl_dereference(b->ht[h2]); for (fp = rtnl_dereference(b->ht[h2]);
fp; fp;
fp = rtnl_dereference(fp->next)) fp = rtnl_dereference(fp->next))
if (fp->handle == f->handle) if (fp->handle == f->handle)
goto errout; return -EEXIST;
} }
if (tb[TCA_ROUTE4_TO]) if (tb[TCA_ROUTE4_TO])
...@@ -450,12 +443,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -450,12 +443,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
tcf_bind_filter(tp, &f->res, base); tcf_bind_filter(tp, &f->res, base);
} }
tcf_exts_change(tp, &f->exts, &e);
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static int route4_change(struct net *net, struct sk_buff *in_skb, static int route4_change(struct net *net, struct sk_buff *in_skb,
......
...@@ -518,7 +518,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, ...@@ -518,7 +518,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
tcf_bind_filter(tp, &n->res, base); tcf_bind_filter(tp, &n->res, base);
} }
tcf_exts_change(tp, &n->exts, &e); tcf_exts_change(&n->exts, &e);
rsvp_replace(tp, n, handle); rsvp_replace(tp, n, handle);
return 0; return 0;
} }
...@@ -591,7 +591,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, ...@@ -591,7 +591,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (f->tunnelhdr == 0) if (f->tunnelhdr == 0)
tcf_bind_filter(tp, &f->res, base); tcf_bind_filter(tp, &f->res, base);
tcf_exts_change(tp, &f->exts, &e); tcf_exts_change(&f->exts, &e);
fp = &s->ht[h2]; fp = &s->ht[h2];
for (nfp = rtnl_dereference(*fp); nfp; for (nfp = rtnl_dereference(*fp); nfp;
......
...@@ -52,7 +52,7 @@ struct tcindex_data { ...@@ -52,7 +52,7 @@ struct tcindex_data {
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
{ {
return tcf_exts_is_predicative(&r->exts) || r->res.classid; return tcf_exts_has_actions(&r->exts) || r->res.classid;
} }
static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
...@@ -419,9 +419,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, ...@@ -419,9 +419,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
} }
if (old_r) if (old_r)
tcf_exts_change(tp, &r->exts, &e); tcf_exts_change(&r->exts, &e);
else else
tcf_exts_change(tp, &cr.exts, &e); tcf_exts_change(&cr.exts, &e);
if (old_r && old_r != r) { if (old_r && old_r != r) {
err = tcindex_filter_result_init(old_r); err = tcindex_filter_result_init(old_r);
...@@ -439,7 +439,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, ...@@ -439,7 +439,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
struct tcindex_filter *nfp; struct tcindex_filter *nfp;
struct tcindex_filter __rcu **fp; struct tcindex_filter __rcu **fp;
tcf_exts_change(tp, &f->result.exts, &r->exts); tcf_exts_change(&f->result.exts, &r->exts);
fp = cp->h + (handle % cp->hash); fp = cp->h + (handle % cp->hash);
for (nfp = rtnl_dereference(*fp); for (nfp = rtnl_dereference(*fp);
......
...@@ -723,29 +723,24 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -723,29 +723,24 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct tc_u_knode *n, struct nlattr **tb, struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, bool ovr) struct nlattr *est, bool ovr)
{ {
struct tcf_exts e;
int err; int err;
err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
goto errout;
err = -EINVAL;
if (tb[TCA_U32_LINK]) { if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]); u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old; struct tc_u_hnode *ht_down = NULL, *ht_old;
if (TC_U32_KEY(handle)) if (TC_U32_KEY(handle))
goto errout; return -EINVAL;
if (handle) { if (handle) {
ht_down = u32_lookup_ht(ht->tp_c, handle); ht_down = u32_lookup_ht(ht->tp_c, handle);
if (ht_down == NULL) if (ht_down == NULL)
goto errout; return -EINVAL;
ht_down->refcnt++; ht_down->refcnt++;
} }
...@@ -765,16 +760,11 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, ...@@ -765,16 +760,11 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
int ret; int ret;
ret = tcf_change_indev(net, tb[TCA_U32_INDEV]); ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
if (ret < 0) if (ret < 0)
goto errout; return -EINVAL;
n->ifindex = ret; n->ifindex = ret;
} }
#endif #endif
tcf_exts_change(tp, &n->exts, &e);
return 0; return 0;
errout:
tcf_exts_destroy(&e);
return err;
} }
static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
struct atm_flow_data { struct atm_flow_data {
struct Qdisc_class_common common;
struct Qdisc *q; /* FIFO, TBF, etc. */ struct Qdisc *q; /* FIFO, TBF, etc. */
struct tcf_proto __rcu *filter_list; struct tcf_proto __rcu *filter_list;
struct tcf_block *block; struct tcf_block *block;
...@@ -49,7 +50,6 @@ struct atm_flow_data { ...@@ -49,7 +50,6 @@ struct atm_flow_data {
struct sk_buff *skb); /* chaining */ struct sk_buff *skb); /* chaining */
struct atm_qdisc_data *parent; /* parent qdisc */ struct atm_qdisc_data *parent; /* parent qdisc */
struct socket *sock; /* for closing */ struct socket *sock; /* for closing */
u32 classid; /* x:y type ID */
int ref; /* reference count */ int ref; /* reference count */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
...@@ -75,7 +75,7 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) ...@@ -75,7 +75,7 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
struct atm_flow_data *flow; struct atm_flow_data *flow;
list_for_each_entry(flow, &p->flows, list) { list_for_each_entry(flow, &p->flows, list) {
if (flow->classid == classid) if (flow->common.classid == classid)
return flow; return flow;
} }
return NULL; return NULL;
...@@ -293,7 +293,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, ...@@ -293,7 +293,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
flow->old_pop = flow->vcc->pop; flow->old_pop = flow->vcc->pop;
flow->parent = p; flow->parent = p;
flow->vcc->pop = sch_atm_pop; flow->vcc->pop = sch_atm_pop;
flow->classid = classid; flow->common.classid = classid;
flow->ref = 1; flow->ref = 1;
flow->excess = excess; flow->excess = excess;
list_add(&flow->list, &p->link.list); list_add(&flow->list, &p->link.list);
...@@ -549,7 +549,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -549,7 +549,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
p->link.vcc = NULL; p->link.vcc = NULL;
p->link.sock = NULL; p->link.sock = NULL;
p->link.classid = sch->handle; p->link.common.classid = sch->handle;
p->link.ref = 1; p->link.ref = 1;
tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
return 0; return 0;
...@@ -594,7 +594,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, ...@@ -594,7 +594,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
sch, p, flow, skb, tcm); sch, p, flow, skb, tcm);
if (list_empty(&flow->list)) if (list_empty(&flow->list))
return -EINVAL; return -EINVAL;
tcm->tcm_handle = flow->classid; tcm->tcm_handle = flow->common.classid;
tcm->tcm_info = flow->q->handle; tcm->tcm_info = flow->q->handle;
nest = nla_nest_start(skb, TCA_OPTIONS); nest = nla_nest_start(skb, TCA_OPTIONS);
...@@ -619,7 +619,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, ...@@ -619,7 +619,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
goto nla_put_failure; goto nla_put_failure;
} }
if (flow->excess) { if (flow->excess) {
if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid)) if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
goto nla_put_failure; goto nla_put_failure;
} else { } else {
if (nla_put_u32(skb, TCA_ATM_EXCESS, 0)) if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment