Commit cb34b7cf authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv6/sit: use DEV_STATS_INC() to avoid data-races

syzbot/KCSAN reported that multiple cpus are updating dev->stats.tx_error
concurrently.

This is because sit tunnels are NETIF_F_LLTX, meaning their ndo_start_xmit()
is not protected by a spinlock.

While original KCSAN report was about tx path, rx path has the same issue.
Reported-by: default avatarsyzbot <syzkaller@googlegroups.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6c1c5097
...@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->dev = tunnel->dev; skb->dev = tunnel->dev;
if (packet_is_spoofed(skb, iph, tunnel)) { if (packet_is_spoofed(skb, iph, tunnel)) {
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto out; goto out;
} }
...@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos); &iph->saddr, iph->tos);
if (err > 1) { if (err > 1) {
++tunnel->dev->stats.rx_frame_errors; DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++tunnel->dev->stats.rx_errors; DEV_STATS_INC(tunnel->dev, rx_errors);
goto out; goto out;
} }
} }
...@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (!rt) { if (!rt) {
rt = ip_route_output_flow(tunnel->net, &fl4, NULL); rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr); dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
...@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
tdev = rt->dst.dev; tdev = rt->dst.dev;
if (tdev == dev) { if (tdev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - t_hlen; mtu = dst_mtu(&rt->dst) - t_hlen;
if (mtu < IPV4_MIN_MTU) { if (mtu < IPV4_MIN_MTU) {
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
ip_rt_put(rt); ip_rt_put(rt);
goto tx_error; goto tx_error;
} }
...@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) { if (!new_skb) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1039,7 +1039,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -1039,7 +1039,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
dst_link_failure(skb); dst_link_failure(skb);
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb, ...@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, ...@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment