Commit cf329aa4 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

udp: cope with UDP GRO packet misdirection

In some scenarios, the GRO engine can assemble an UDP GRO packet
that ultimately lands on a non GRO-enabled socket.
This patch tries to address the issue explicitly checking for the UDP
socket features before enqueuing the packet, and eventually segmenting
the unexpected GRO packet, as needed.

We must also cope with re-insertion requests: after segmentation the
UDP code calls the helper introduced by the previous patches, as needed.

Segmentation is performed by a common helper, which takes care of
updating socket and protocol stats is case of failure.

rfc v3 -> v1
 - fix compile issues with rxrpc
 - when gso_segment returns NULL, treat is as an error
 - added 'ipv4' argument to udp_rcv_segment()

rfc v2 -> rfc v3
 - moved udp_rcv_segment() into net/udp.h, account errors to socket
   and ns, always return NULL or segs list
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 80bde363
...@@ -132,6 +132,12 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, ...@@ -132,6 +132,12 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
} }
} }
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
}
#define udp_portaddr_for_each_entry(__sk, list) \ #define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
......
...@@ -417,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, ...@@ -417,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
} while(0) } while(0)
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
#define __UDPX_INC_STATS(sk, field) \ #define __UDPX_MIB(sk, ipv4) \
do { \ ({ \
if ((sk)->sk_family == AF_INET) \ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
__UDP_INC_STATS(sock_net(sk), field, 0); \ sock_net(sk)->mib.udp_statistics) : \
else \ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
__UDP6_INC_STATS(sock_net(sk), field, 0); \ sock_net(sk)->mib.udp_stats_in6); \
} while (0) })
#else #else
#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0) #define __UDPX_MIB(sk, ipv4) \
({ \
IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
sock_net(sk)->mib.udp_statistics; \
})
#endif #endif
#define __UDPX_INC_STATS(sk, field) \
__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
struct udp_seq_afinfo { struct udp_seq_afinfo {
sa_family_t family; sa_family_t family;
...@@ -461,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); ...@@ -461,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void); void udpv6_encap_enable(void);
#endif #endif
static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
struct sk_buff *skb, bool ipv4)
{
struct sk_buff *segs;
/* the GSO CB lays after the UDP one, no need to save and restore any
* CB fragment
*/
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
if (unlikely(IS_ERR_OR_NULL(segs))) {
int segs_nr = skb_shinfo(skb)->gso_segs;
atomic_add(segs_nr, &sk->sk_drops);
SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
kfree_skb(skb);
return NULL;
}
consume_skb(skb);
return segs;
}
#endif /* _UDP_H */ #endif /* _UDP_H */
...@@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(udp_encap_enable); ...@@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(udp_encap_enable);
* Note that in the success and error cases, the skb is assumed to * Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed. * have either been requeued or freed.
*/ */
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
...@@ -2009,6 +2009,27 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2009,6 +2009,27 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return -1; return -1;
} }
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *next, *segs;
int ret;
if (likely(!udp_unexpected_gso(sk, skb)))
return udp_queue_rcv_one_skb(sk, skb);
BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
__skb_push(skb, -skb_mac_offset(skb));
segs = udp_rcv_segment(sk, skb, true);
for (skb = segs; skb; skb = next) {
next = skb->next;
__skb_pull(skb, skb_transport_offset(skb));
ret = udp_queue_rcv_one_skb(sk, skb);
if (ret > 0)
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
}
return 0;
}
/* For TCP sockets, sk_rx_dst is protected by socket lock /* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes. * For UDP, we use xchg() to guard against concurrent changes.
*/ */
......
...@@ -554,7 +554,7 @@ void udpv6_encap_enable(void) ...@@ -554,7 +554,7 @@ void udpv6_encap_enable(void)
} }
EXPORT_SYMBOL(udpv6_encap_enable); EXPORT_SYMBOL(udpv6_encap_enable);
static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
...@@ -637,6 +637,28 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -637,6 +637,28 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return -1; return -1;
} }
static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *next, *segs;
int ret;
if (likely(!udp_unexpected_gso(sk, skb)))
return udpv6_queue_rcv_one_skb(sk, skb);
__skb_push(skb, -skb_mac_offset(skb));
segs = udp_rcv_segment(sk, skb, false);
for (skb = segs; skb; skb = next) {
next = skb->next;
__skb_pull(skb, skb_transport_offset(skb));
ret = udpv6_queue_rcv_one_skb(sk, skb);
if (ret > 0)
ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
true);
}
return 0;
}
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr, __be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr, __be16 rmt_port, const struct in6_addr *rmt_addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment