Commit c4d2444e authored by Sridhar Samudrala's avatar Sridhar Samudrala

[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().

Validate and update the sk in sctp_rcv() to avoid the race where an
assoc/ep could move to a different socket after we get the sk, but before
the skb is added to the backlog.

Also migrate the skb's in backlog queue to new sk when doing a peeloff.
Signed-off-by: default avatarSridhar Samudrala <sri@us.ibm.com>
parent 313e7b4d
...@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, ...@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
void sctp_icmp_proto_unreachable(struct sock *sk, void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc, struct sctp_association *asoc,
struct sctp_transport *t); struct sctp_transport *t);
void sctp_backlog_migrate(struct sctp_association *assoc,
struct sock *oldsk, struct sock *newsk);
/* /*
* Section: Macros, externs, and inlines * Section: Macros, externs, and inlines
......
...@@ -257,6 +257,15 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -257,6 +257,15 @@ int sctp_rcv(struct sk_buff *skb)
*/ */
sctp_bh_lock_sock(sk); sctp_bh_lock_sock(sk);
/* It is possible that the association could have moved to a different
* socket if it is peeled off. If so, update the sk.
*/
if (sk != rcvr->sk) {
sctp_bh_lock_sock(rcvr->sk);
sctp_bh_unlock_sock(sk);
sk = rcvr->sk;
}
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
sk_add_backlog(sk, skb); sk_add_backlog(sk, skb);
else else
...@@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = NULL; struct sctp_ep_common *rcvr = NULL;
rcvr = chunk->rcvr; rcvr = chunk->rcvr;
BUG_TRAP(rcvr->sk == sk);
if (rcvr->dead) { if (rcvr->dead) {
sctp_chunk_free(chunk); sctp_chunk_free(chunk);
} else { } else {
...@@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
} }
void sctp_backlog_migrate(struct sctp_association *assoc,
struct sock *oldsk, struct sock *newsk)
{
struct sk_buff *skb;
struct sctp_chunk *chunk;
skb = oldsk->sk_backlog.head;
oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
while (skb != NULL) {
struct sk_buff *next = skb->next;
chunk = SCTP_INPUT_CB(skb)->chunk;
skb->next = NULL;
if (&assoc->base == chunk->rcvr)
sk_add_backlog(newsk, skb);
else
sk_add_backlog(oldsk, skb);
skb = next;
}
}
/* Handle icmp frag needed error. */ /* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu) struct sctp_transport *t, __u32 pmtu)
......
...@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/ */
newsp->type = type; newsp->type = type;
spin_lock_bh(&oldsk->sk_lock.slock);
/* Migrate the backlog from oldsk to newsk. */
sctp_backlog_migrate(assoc, oldsk, newsk);
/* Migrate the association to the new socket. */ /* Migrate the association to the new socket. */
sctp_assoc_migrate(assoc, newsk); sctp_assoc_migrate(assoc, newsk);
spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept() /* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag. * is called, set RCV_SHUTDOWN flag.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment