Commit 19f6484f authored by David S. Miller's avatar David S. Miller

Merge branch 'GSO_BY_FRAGS-correctness-improvements'

Daniel Axtens says:

====================
GSO_BY_FRAGS correctness improvements

As requested [1], I went through and had a look at users of gso_size to
see if there were things that need to be fixed to consider
GSO_BY_FRAGS, and I have tried to improve our helper functions to deal
with this case.

I found a few. This fixes bugs relating to the use of
skb_gso_*_seglen() where GSO_BY_FRAGS is not considered.

Patch 1 renames skb_gso_validate_mtu to skb_gso_validate_network_len.
This is follow-up to my earlier patch 2b16f048 ("net: create
skb_gso_validate_mac_len()"), and just makes everything a bit clearer.

Patches 2 and 3 replace the final users of skb_gso_network_seglen() -
which doesn't consider GSO_BY_FRAGS - with
skb_gso_validate_network_len(), which does. This allows me to make the
skb_gso_*_seglen functions private in patch 4 - now future users won't
accidentally do the wrong comparison.

Two things remain. One is qdisc_pkt_len_init, which is discussed at
[2] - it's caught up in the GSO_DODGY mess. I don't have any expertise
in GSO_DODGY, and it looks like a good clean fix will involve
unpicking the whole validation mess, so I have left it for now.

Secondly, there are 3 eBPF opcodes that change the gso_size of an SKB
and don't consider GSO_BY_FRAGS. This is going through the bpf tree.

Regards,
Daniel

[1] https://patchwork.ozlabs.org/comment/1852414/
[2] https://www.spinics.net/lists/netdev/msg482397.html

PS: This is all in the core networking stack. For a driver to be
affected by this it would need to support NETIF_F_GSO_SCTP /
NETIF_F_GSO_SOFTWARE and then either use gso_size or not be a purely
virtual device. (Many drivers look at gso_size, but do not support
SCTP segmentation, so the core network will segment an SCTP gso before
it hits them.) Based on that, the only driver that may be affected is
sunvnet, but I have no way of testing it, so I haven't looked at it.

v2: split out bpf stuff
    fix review comments from Dave Miller
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4e00f5d5 a4a77718
......@@ -3285,8 +3285,7 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
......@@ -4104,38 +4103,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
return !skb->head_frag || skb_cloned(skb);
}
/**
* skb_gso_network_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_network_seglen is used to determine the real size of the
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
*
* The MAC/L2 header is not accounted for.
*/
static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) -
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/**
* skb_gso_mac_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_mac_seglen is used to determine the real size of the
* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
* headers (TCP/UDP).
*/
static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/* Local Checksum Offload.
* Compute outer checksum based on the assumption that the
* inner checksum will be offloaded later.
......
......@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
*
* The MAC/L2 or network (IP, IPv6) headers are not accounted for.
*/
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int thlen = 0;
......@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
*/
return thlen + shinfo->gso_size;
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
/**
* skb_gso_network_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_network_seglen is used to determine the real size of the
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
*
* The MAC/L2 header is not accounted for.
*/
static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) -
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/**
* skb_gso_mac_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_mac_seglen is used to determine the real size of the
* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
* headers (TCP/UDP).
*/
static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/**
* skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
......@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
}
/**
* skb_gso_validate_mtu - Return in case such skb fits a given MTU
* skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
*
* @skb: GSO skb
* @mtu: MTU to validate against
*
* skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
* once split.
* skb_gso_validate_network_len validates if a given skb will fit a
* wanted MTU once split. It considers L3 headers, L4 headers, and the
* payload.
*/
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
{
return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
}
EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
/**
* skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
......
......@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
......
......@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
/* common case: seglen is <= mtu
*/
if (skb_gso_validate_mtu(skb, mtu))
if (skb_gso_validate_network_len(skb, mtu))
return ip_finish_output2(net, sk, skb);
/* Slowpath - GSO segment length exceeds the egress MTU.
......
......@@ -186,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
......
......@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
mtu = dst_mtu(skb_dst(skb));
if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) {
(skb_is_gso(skb) &&
!skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb->protocol = htons(ETH_P_IP);
if (skb->sk)
......
......@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
......
......@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if (skb->len <= mtu)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
......
......@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) &&
skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) {
!skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
......
......@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->len <= mtu)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
......
......@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
if (skb_is_gso(skb) &&
skb_gso_validate_mac_len(skb, q->max_size))
return tbf_segment(skb, sch, to_free);
return qdisc_drop(skb, sch, to_free);
}
......
......@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (skb->len <= mtu)
goto ok;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
goto ok;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment