Commit c46b7897 authored by pravin shelar's avatar pravin shelar Committed by David S. Miller

vxlan: simplify exception handling

vxlan egress path error handling has became complicated, it
need to handle IPv4 and IPv6 tunnel cases.
Earlier patch removes vlan handling from vxlan_build_skb(), so
vxlan_build_skb does not need to free skb and we can simplify
the xmit path by having single error handling for both type of
tunnels.
Signed-off-by: default avatarPravin B Shelar <pshelar@ovn.org>
Acked-by: default avatarJiri Benc <jbenc@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 03dc52a8
...@@ -1755,11 +1755,11 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, ...@@ -1755,11 +1755,11 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
/* Need space for new headers (invalidates iph ptr) */ /* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) if (unlikely(err))
goto out_free; return err;
err = iptunnel_handle_offloads(skb, type); err = iptunnel_handle_offloads(skb, type);
if (err) if (err)
goto out_free; return err;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI; vxh->vx_flags = VXLAN_HF_VNI;
...@@ -1783,16 +1783,12 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, ...@@ -1783,16 +1783,12 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (vxflags & VXLAN_F_GPE) { if (vxflags & VXLAN_F_GPE) {
err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol); err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
if (err < 0) if (err < 0)
goto out_free; return err;
inner_protocol = skb->protocol; inner_protocol = skb->protocol;
} }
skb_set_inner_protocol(skb, inner_protocol); skb_set_inner_protocol(skb, inner_protocol);
return 0; return 0;
out_free:
kfree_skb(skb);
return err;
} }
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
...@@ -1929,13 +1925,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1929,13 +1925,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info; struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk; struct sock *sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph; const struct iphdr *old_iph;
union vxlan_addr *dst; union vxlan_addr *dst;
union vxlan_addr remote_ip, local_ip; union vxlan_addr remote_ip, local_ip;
union vxlan_addr *src; union vxlan_addr *src;
struct vxlan_metadata _md; struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md; struct vxlan_metadata *md = &_md;
struct dst_entry *ndst = NULL;
__be16 src_port = 0, dst_port; __be16 src_port = 0, dst_port;
__be32 vni, label; __be32 vni, label;
__be16 df = 0; __be16 df = 0;
...@@ -2011,6 +2007,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2011,6 +2007,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (dst->sa.sa_family == AF_INET) { if (dst->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
if (!sock4) if (!sock4)
goto drop; goto drop;
...@@ -2032,7 +2029,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2032,7 +2029,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
netdev_dbg(dev, "circular route to %pI4\n", netdev_dbg(dev, "circular route to %pI4\n",
&dst->sin.sin_addr.s_addr); &dst->sin.sin_addr.s_addr);
dev->stats.collisions++; dev->stats.collisions++;
goto rt_tx_error; ip_rt_put(rt);
goto tx_error;
} }
/* Bypass encapsulation if the destination is local */ /* Bypass encapsulation if the destination is local */
...@@ -2055,12 +2053,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2055,12 +2053,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
df = htons(IP_DF); df = htons(IP_DF);
ndst = &rt->dst;
tos = ip_tunnel_ecn_encap(tos, old_iph, skb); tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum); vni, md, flags, udp_sum);
if (err < 0) if (err < 0)
goto xmit_tx_error; goto tx_error;
udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df, dst->sin.sin_addr.s_addr, tos, ttl, df,
...@@ -2068,7 +2067,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2068,7 +2067,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags; u32 rt6i_flags;
ndst = vxlan6_get_route(vxlan, sock6, skb, ndst = vxlan6_get_route(vxlan, sock6, skb,
...@@ -2080,13 +2078,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2080,13 +2078,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
netdev_dbg(dev, "no route to %pI6\n", netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr); &dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++; dev->stats.tx_carrier_errors++;
ndst = NULL;
goto tx_error; goto tx_error;
} }
if (ndst->dev == dev) { if (ndst->dev == dev) {
netdev_dbg(dev, "circular route to %pI6\n", netdev_dbg(dev, "circular route to %pI6\n",
&dst->sin6.sin6_addr); &dst->sin6.sin6_addr);
dst_release(ndst);
dev->stats.collisions++; dev->stats.collisions++;
goto tx_error; goto tx_error;
} }
...@@ -2098,12 +2096,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2098,12 +2096,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
!(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan; struct vxlan_dev *dst_vxlan;
dst_release(ndst);
dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_vxlan = vxlan_find_vni(vxlan->net, vni,
dst->sa.sa_family, dst_port, dst->sa.sa_family, dst_port,
vxlan->flags); vxlan->flags);
if (!dst_vxlan) if (!dst_vxlan)
goto tx_error; goto tx_error;
dst_release(ndst);
vxlan_encap_bypass(skb, vxlan, dst_vxlan); vxlan_encap_bypass(skb, vxlan, dst_vxlan);
return; return;
} }
...@@ -2116,11 +2114,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2116,11 +2114,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_scrub_packet(skb, xnet); skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
vni, md, flags, udp_sum); vni, md, flags, udp_sum);
if (err < 0) { if (err < 0)
dst_release(ndst); goto tx_error;
dev->stats.tx_errors++;
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev, udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
&src->sin6.sin6_addr, &src->sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl, &dst->sin6.sin6_addr, tos, ttl,
...@@ -2132,17 +2128,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2132,17 +2128,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
drop: drop:
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
goto tx_free; dev_kfree_skb(skb);
return;
xmit_tx_error:
/* skb is already freed. */
skb = NULL;
rt_tx_error:
ip_rt_put(rt);
tx_error: tx_error:
dst_release(ndst);
dev->stats.tx_errors++; dev->stats.tx_errors++;
tx_free: kfree_skb(skb);
dev_kfree_skb(skb);
} }
/* Transmit local packets over Vxlan /* Transmit local packets over Vxlan
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment