Commit 9410c940 authored by Paul Blakey's avatar Paul Blakey Committed by Saeed Mahameed

net: sched: Introduce ingress classification function

TC multi chain configuration can cause offloaded tc chains to miss in
hardware after jumping to some chain. In such cases the software should
continue from the chain that missed in hardware, as the hardware may
have manipulated the packet and updated some counters.

Currently a single tcf classification function serves both ingress and
egress. However, multi chain miss processing (get tc skb extension on
hw miss, set tc skb extension on tc miss) should happen only on
ingress.

Refactor the code to use ingress classification function, and move setting
the tc skb extension from general classification to it, as a prestep
for supporting the hw miss scenario.
Co-developed-by: default avatarVlad Buslov <vladbu@mellanox.com>
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Signed-off-by: default avatarPaul Blakey <paulb@mellanox.com>
Reviewed-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 2bb07f4e
...@@ -72,6 +72,8 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) ...@@ -72,6 +72,8 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode); struct tcf_result *res, bool compat_mode);
int tcf_classify_ingress(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
#else #else
static inline bool tcf_block_shared(struct tcf_block *block) static inline bool tcf_block_shared(struct tcf_block *block)
...@@ -133,6 +135,14 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -133,6 +135,14 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{ {
return TC_ACT_UNSPEC; return TC_ACT_UNSPEC;
} }
static inline int tcf_classify_ingress(struct sk_buff *skb,
const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
return TC_ACT_UNSPEC;
}
#endif #endif
static inline unsigned long static inline unsigned long
......
...@@ -4860,7 +4860,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, ...@@ -4860,7 +4860,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
skb->tc_at_ingress = 1; skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb); mini_qdisc_bstats_cpu_update(miniq, skb);
switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { switch (tcf_classify_ingress(skb, miniq->filter_list, &cl_res,
false)) {
case TC_ACT_OK: case TC_ACT_OK:
case TC_ACT_RECLASSIFY: case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid); skb->tc_index = TC_H_MIN(cl_res.classid);
......
...@@ -1559,8 +1559,11 @@ static int tcf_block_setup(struct tcf_block *block, ...@@ -1559,8 +1559,11 @@ static int tcf_block_setup(struct tcf_block *block,
* to this qdisc, (optionally) tests for protocol and asks * to this qdisc, (optionally) tests for protocol and asks
* specific classifiers. * specific classifiers.
*/ */
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, static inline int __tcf_classify(struct sk_buff *skb,
struct tcf_result *res, bool compat_mode) const struct tcf_proto *tp,
struct tcf_result *res,
bool compat_mode,
u32 *last_executed_chain)
{ {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4; const int max_reclassify_loop = 4;
...@@ -1582,21 +1585,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -1582,21 +1585,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
first_tp = orig_tp; first_tp = orig_tp;
*last_executed_chain = first_tp->chain->index;
goto reset; goto reset;
} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
first_tp = res->goto_tp; first_tp = res->goto_tp;
*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
{
struct tc_skb_ext *ext;
ext = skb_ext_add(skb, TC_SKB_EXT);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = err & TC_ACT_EXT_VAL_MASK;
}
#endif
goto reset; goto reset;
} }
#endif #endif
...@@ -1619,8 +1612,45 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -1619,8 +1612,45 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
goto reclassify; goto reclassify;
#endif #endif
} }
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
u32 last_executed_chain = 0;
return __tcf_classify(skb, tp, res, compat_mode,
&last_executed_chain);
}
EXPORT_SYMBOL(tcf_classify); EXPORT_SYMBOL(tcf_classify);
int tcf_classify_ingress(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 last_executed_chain = 0;
return __tcf_classify(skb, tp, res, compat_mode,
&last_executed_chain);
#else
u32 last_executed_chain = tp ? tp->chain->index : 0;
struct tc_skb_ext *ext;
int ret;
ret = __tcf_classify(skb, tp, res, compat_mode, &last_executed_chain);
/* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
ext = skb_ext_add(skb, TC_SKB_EXT);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
}
return ret;
#endif
}
EXPORT_SYMBOL(tcf_classify_ingress);
struct tcf_chain_info { struct tcf_chain_info {
struct tcf_proto __rcu **pprev; struct tcf_proto __rcu **pprev;
struct tcf_proto __rcu *next; struct tcf_proto __rcu *next;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment