Commit e578d9c0 authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

net: sched: use counter to break reclassify loops

Seems all we want here is to avoid endless 'goto reclassify' loop.
tc_classify_compat even resets this counter when something other
than TC_ACT_RECLASSIFY is returned, so this skb-counter doesn't
break hypothetical loops induced by something other than perpetual
TC_ACT_RECLASSIFY return values.

skb_act_clone is now identical to skb_clone, so just use that.

Tested with following (bogus) filter:
tc filter add dev eth0 parent ffff: \
 protocol ip u32 match u32 0 0 police rate 10Kbit burst \
 64000 mtu 1500 action reclassify
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Acked-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Acked-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b04096ff
...@@ -8,10 +8,6 @@ For example if your action queues a packet to be processed later, ...@@ -8,10 +8,6 @@ For example if your action queues a packet to be processed later,
or intentionally branches by redirecting a packet, then you need to or intentionally branches by redirecting a packet, then you need to
clone the packet. clone the packet.
There are certain fields in the skb tc_verd that need to be reset so we
avoid loops, etc. A few are generic enough that skb_act_clone()
resets them for you, so invoke skb_act_clone() rather than skb_clone().
2) If you munge any packet thou shalt call pskb_expand_head in the case 2) If you munge any packet thou shalt call pskb_expand_head in the case
someone else is referencing the skb. After that you "own" the skb. someone else is referencing the skb. After that you "own" the skb.
......
...@@ -739,21 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) ...@@ -739,21 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
return rtab->data[slot]; return rtab->data[slot];
} }
#ifdef CONFIG_NET_CLS_ACT
static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
int action)
{
struct sk_buff *n;
n = skb_clone(skb, gfp_mask);
if (n) {
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
}
return n;
}
#endif
struct psched_ratecfg { struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */ u64 rate_bytes_ps; /* bytes per second */
u32 mult; u32 mult;
......
...@@ -44,13 +44,13 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance ...@@ -44,13 +44,13 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
#define TC_OK2MUNGE _TC_MAKEMASK1(1) #define TC_OK2MUNGE _TC_MAKEMASK1(1)
#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE)) #define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE) #define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE)
#endif
#define S_TC_VERD _TC_MAKE32(2) #define S_TC_VERD _TC_MAKE32(2)
#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD) #define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD)
#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD) #define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD) #define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD)
#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD)) #define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
#endif
#define S_TC_FROM _TC_MAKE32(6) #define S_TC_FROM _TC_MAKE32(6)
#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM) #define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM)
......
...@@ -151,7 +151,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, ...@@ -151,7 +151,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
} }
at = G_TC_AT(skb->tc_verd); at = G_TC_AT(skb->tc_verd);
skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action); skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL) if (skb2 == NULL)
goto out; goto out;
......
...@@ -1816,13 +1816,8 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -1816,13 +1816,8 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
continue; continue;
err = tp->classify(skb, tp, res); err = tp->classify(skb, tp, res);
if (err >= 0) { if (err >= 0)
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
#endif
return err; return err;
}
} }
return -1; return -1;
} }
...@@ -1834,23 +1829,22 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -1834,23 +1829,22 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
int err = 0; int err = 0;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
const struct tcf_proto *otp = tp; const struct tcf_proto *otp = tp;
int limit = 0;
reclassify: reclassify:
#endif #endif
err = tc_classify_compat(skb, tp, res); err = tc_classify_compat(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (err == TC_ACT_RECLASSIFY) { if (err == TC_ACT_RECLASSIFY) {
u32 verd = G_TC_VERD(skb->tc_verd);
tp = otp; tp = otp;
if (verd++ >= MAX_REC_LOOP) { if (unlikely(limit++ >= MAX_REC_LOOP)) {
net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n", net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
tp->q->ops->id, tp->q->ops->id,
tp->prio & 0xffff, tp->prio & 0xffff,
ntohs(tp->protocol)); ntohs(tp->protocol));
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
goto reclassify; goto reclassify;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment