Commit d02f51cb authored by Daniel Axtens's avatar Daniel Axtens Committed by Alexei Starovoitov

bpf: fix bpf_skb_adjust_net/bpf_skb_proto_xlat to deal with gso sctp skbs

SCTP GSO skbs have a gso_size of GSO_BY_FRAGS, so any sort of
unconditionally mangling of that will result in nonsense value
and would corrupt the skb later on.

Therefore, i) add two helpers skb_increase_gso_size() and
skb_decrease_gso_size() that would throw a one time warning and
bail out for such skbs and ii) refuse and return early with an
error in those BPF helpers that are affected. We do need to bail
out as early as possible from there before any changes on the
skb have been performed.

Fixes: 6578171a ("bpf: add bpf_skb_change_proto helper")
Co-authored-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarDaniel Axtens <dja@axtens.net>
Cc: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 4a0c7191
...@@ -153,8 +153,15 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS. ...@@ -153,8 +153,15 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
Therefore, any code in the core networking stack must be aware of the Therefore, any code in the core networking stack must be aware of the
possibility that gso_size will be GSO_BY_FRAGS and handle that case possibility that gso_size will be GSO_BY_FRAGS and handle that case
appropriately. (For size checks, the skb_gso_validate_*_len family of appropriately.
helpers do this automatically.)
There are a couple of helpers to make this easier:
- For size checks, the skb_gso_validate_*_len family of helpers correctly
considers GSO_BY_FRAGS.
- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE. set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
...@@ -4038,6 +4038,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb) ...@@ -4038,6 +4038,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
} }
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}
static inline void skb_gso_reset(struct sk_buff *skb) static inline void skb_gso_reset(struct sk_buff *skb)
{ {
skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_size = 0;
...@@ -4045,6 +4051,22 @@ static inline void skb_gso_reset(struct sk_buff *skb) ...@@ -4045,6 +4051,22 @@ static inline void skb_gso_reset(struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_type = 0;
} }
static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
u16 increment)
{
if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
return;
shinfo->gso_size += increment;
}
static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
u16 decrement)
{
if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
return;
shinfo->gso_size -= decrement;
}
void __skb_warn_lro_forwarding(const struct sk_buff *skb); void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb) static inline bool skb_warn_if_lro(const struct sk_buff *skb)
......
...@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) ...@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) ...@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
return ret; return ret;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* SKB_GSO_TCPV4 needs to be changed into /* SKB_GSO_TCPV4 needs to be changed into
* SKB_GSO_TCPV6. * SKB_GSO_TCPV6.
*/ */
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { if (shinfo->gso_type & SKB_GSO_TCPV4) {
skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type &= ~SKB_GSO_TCPV4;
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV6;
} }
/* Due to IPv6 header, MSS needs to be downgraded. */ /* Due to IPv6 header, MSS needs to be downgraded. */
skb_shinfo(skb)->gso_size -= len_diff; skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */ /* Header must be checked, and gso_segs recomputed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; shinfo->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0; shinfo->gso_segs = 0;
} }
skb->protocol = htons(ETH_P_IPV6); skb->protocol = htons(ETH_P_IPV6);
...@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) ...@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) ...@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
return ret; return ret;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* SKB_GSO_TCPV6 needs to be changed into /* SKB_GSO_TCPV6 needs to be changed into
* SKB_GSO_TCPV4. * SKB_GSO_TCPV4.
*/ */
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { if (shinfo->gso_type & SKB_GSO_TCPV6) {
skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type &= ~SKB_GSO_TCPV6;
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV4;
} }
/* Due to IPv4 header, MSS can be upgraded. */ /* Due to IPv4 header, MSS can be upgraded. */
skb_shinfo(skb)->gso_size += len_diff; skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */ /* Header must be checked, and gso_segs recomputed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; shinfo->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0; shinfo->gso_segs = 0;
} }
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IP);
...@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) ...@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) ...@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
return ret; return ret;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* Due to header grow, MSS needs to be downgraded. */ /* Due to header grow, MSS needs to be downgraded. */
skb_shinfo(skb)->gso_size -= len_diff; skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */ /* Header must be checked, and gso_segs recomputed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; shinfo->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0; shinfo->gso_segs = 0;
} }
return 0; return 0;
...@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) ...@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) ...@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
return ret; return ret;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* Due to header shrink, MSS can be upgraded. */ /* Due to header shrink, MSS can be upgraded. */
skb_shinfo(skb)->gso_size += len_diff; skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */ /* Header must be checked, and gso_segs recomputed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; shinfo->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0; shinfo->gso_segs = 0;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment