Commit cb80ef46 authored by Benjamin LaHaise's avatar Benjamin LaHaise Committed by David S. Miller

net/ipv6/udp: UDP encapsulation: move socket locking into udpv6_queue_rcv_skb()

In order to make sure that when the encap_rcv() hook is introduced it is
not called with the socket lock held, move socket locking from callers into
udpv6_queue_rcv_skb(), matching what happens in IPv4.
Signed-off-by: default avatarBenjamin LaHaise <bcrl@kvack.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f7ad74fe
...@@ -558,14 +558,25 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -558,14 +558,25 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
} }
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop;
skb_dst_drop(skb); skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb); rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc; return rc;
drop: drop:
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
...@@ -614,37 +625,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, ...@@ -614,37 +625,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
static void flush_stack(struct sock **stack, unsigned int count, static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final) struct sk_buff *skb, unsigned int final)
{ {
unsigned int i; struct sk_buff *skb1 = NULL;
struct sock *sk; struct sock *sk;
struct sk_buff *skb1; unsigned int i;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(skb1 == NULL))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
sk = stack[i]; if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
if (skb1) { skb1 = NULL;
if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1);
goto drop;
} }
bh_lock_sock(sk); if (unlikely(skb1))
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1); kfree_skb(skb1);
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
continue;
}
drop:
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_INERRORS, IS_UDPLITE(sk));
}
} }
/* /*
* Note: called only from the BH handler context, * Note: called only from the BH handler context,
...@@ -784,40 +785,30 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ...@@ -784,40 +785,30 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
* for sock caches... i'll skip this for now. * for sock caches... i'll skip this for now.
*/ */
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk != NULL) {
int ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (sk == NULL) {
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard; goto discard;
if (udp_lib_checksum_complete(skb)) if (udp_lib_checksum_complete(skb))
goto discard; goto discard;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
proto == IPPROTO_UDPLITE);
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
}
/* deliver */
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
sock_put(sk);
goto discard;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
bh_unlock_sock(sk);
sock_put(sk);
goto discard;
}
bh_unlock_sock(sk);
sock_put(sk);
return 0;
short_packet: short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment