Commit 4372339e authored by Jason A. Donenfeld's avatar Jason A. Donenfeld Committed by David S. Miller

net: always use icmp{,v6}_ndo_send from ndo_start_xmit

There were a few remaining tunnel drivers that didn't receive the prior
conversion to icmp{,v6}_ndo_send. Knowing now that this could lead to
memory corrution (see ee576c47 ("net: icmp: pass zeroed opts from
icmp{,v6}_ndo_send before sending") for details), there's even more
imperative to have these all converted. So this commit goes through the
remaining cases that I could find and does a boring translation to the
ndo variety.

The Fixes: line below is the merge that originally added icmp{,v6}_
ndo_send and converted the first batch of icmp{,v6}_send users. The
rationale then for the change applies equally to this patch. It's just
that these drivers were left out of the initial conversion because these
network devices are hiding in net/ rather than in drivers/net/.

Cc: Florian Westphal <fw@strlen.de>
Cc: Willem de Bruijn <willemb@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
Cc: David Ahern <dsahern@kernel.org>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Fixes: 803381f9 ("Merge branch 'icmp-account-for-NAT-when-sending-icmps-from-ndo-layer'")
Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Acked-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9eb8bc59
...@@ -502,8 +502,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, ...@@ -502,8 +502,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
if (!skb_is_gso(skb) && if (!skb_is_gso(skb) &&
(inner_iph->frag_off & htons(IP_DF)) && (inner_iph->frag_off & htons(IP_DF)) &&
mtu < pkt_size) { mtu < pkt_size) {
memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
return -E2BIG; return -E2BIG;
} }
} }
...@@ -527,7 +526,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, ...@@ -527,7 +526,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU && if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
mtu < pkt_size) { mtu < pkt_size) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -E2BIG; return -E2BIG;
} }
} }
......
...@@ -238,13 +238,13 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -238,13 +238,13 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb->len > mtu) { if (skb->len > mtu) {
skb_dst_update_pmtu_no_confirm(skb, mtu); skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu)); htonl(mtu));
} else { } else {
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} }
dst_release(dst); dst_release(dst);
......
...@@ -678,8 +678,8 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, ...@@ -678,8 +678,8 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) { if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB, icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
ICMPV6_HDR_FIELD, offset + 2); ICMPV6_HDR_FIELD, offset + 2);
return -1; return -1;
} }
*encap_limit = tel->encap_limit - 1; *encap_limit = tel->encap_limit - 1;
...@@ -805,8 +805,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) ...@@ -805,8 +805,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
if (err != 0) { if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */ /* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu)); htonl(mtu));
return -1; return -1;
} }
...@@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) ...@@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
&mtu, skb->protocol); &mtu, skb->protocol);
if (err != 0) { if (err != 0) {
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -1; return -1;
} }
...@@ -1063,10 +1063,10 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, ...@@ -1063,10 +1063,10 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
/* XXX: send ICMP error even if DF is not set. */ /* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE) { if (err == -EMSGSIZE) {
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, icmp_ndo_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu)); ICMP_FRAG_NEEDED, htonl(mtu));
else else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} }
goto tx_err; goto tx_err;
......
...@@ -1332,8 +1332,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -1332,8 +1332,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
tel = (void *)&skb_network_header(skb)[offset]; tel = (void *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) { if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB, icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
ICMPV6_HDR_FIELD, offset + 2); ICMPV6_HDR_FIELD, offset + 2);
return -1; return -1;
} }
encap_limit = tel->encap_limit - 1; encap_limit = tel->encap_limit - 1;
...@@ -1385,11 +1385,11 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -1385,11 +1385,11 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
switch (protocol) { switch (protocol) {
case IPPROTO_IPIP: case IPPROTO_IPIP:
icmp_send(skb, ICMP_DEST_UNREACH, icmp_ndo_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu)); ICMP_FRAG_NEEDED, htonl(mtu));
break; break;
case IPPROTO_IPV6: case IPPROTO_IPV6:
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
break; break;
default: default:
break; break;
......
...@@ -521,10 +521,10 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -521,10 +521,10 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} else { } else {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu)); htonl(mtu));
} }
err = -EMSGSIZE; err = -EMSGSIZE;
......
...@@ -987,7 +987,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -987,7 +987,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_dst_update_pmtu_no_confirm(skb, mtu); skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) { if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt); ip_rt_put(rt);
goto tx_error; goto tx_error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment