Commit fc3b43ea authored by David S. Miller's avatar David S. Miller

Remove bogus tw->tb NULL check in

tcp_timewait_kill.  This is what made the following
bug harder to find:
Put new timewait buckets into the bind hash _FIRST_
before they appear into the established hash to
kill some races with socket creation/lookup.
parent 5a7c2000
...@@ -75,17 +75,16 @@ void tcp_timewait_kill(struct tcp_tw_bucket *tw) ...@@ -75,17 +75,16 @@ void tcp_timewait_kill(struct tcp_tw_bucket *tw)
/* Disassociate with bind bucket. */ /* Disassociate with bind bucket. */
bhead = &tcp_bhash[tcp_bhashfn(tw->num)]; bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
if ((tb = tw->tb) != NULL) { tb = tw->tb;
if(tw->bind_next) if(tw->bind_next)
tw->bind_next->bind_pprev = tw->bind_pprev; tw->bind_next->bind_pprev = tw->bind_pprev;
*(tw->bind_pprev) = tw->bind_next; *(tw->bind_pprev) = tw->bind_next;
tw->tb = NULL; tw->tb = NULL;
if (tb->owners == NULL) { if (tb->owners == NULL) {
if (tb->next) if (tb->next)
tb->next->pprev = tb->pprev; tb->next->pprev = tb->pprev;
*(tb->pprev) = tb->next; *(tb->pprev) = tb->next;
kmem_cache_free(tcp_bucket_cachep, tb); kmem_cache_free(tcp_bucket_cachep, tb);
}
} }
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);
...@@ -304,9 +303,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -304,9 +303,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
struct tcp_bind_hashbucket *bhead; struct tcp_bind_hashbucket *bhead;
struct sock **head, *sktw; struct sock **head, *sktw;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
tw->tb = (struct tcp_bind_bucket *)sk->prev;
BUG_TRAP(sk->prev!=NULL);
if ((tw->bind_next = tw->tb->owners) != NULL)
tw->tb->owners->bind_pprev = &tw->bind_next;
tw->tb->owners = (struct sock*)tw;
tw->bind_pprev = &tw->tb->owners;
spin_unlock(&bhead->lock);
write_lock(&ehead->lock); write_lock(&ehead->lock);
/* Step 1: Remove SK from established hash. */ /* Step 2: Remove SK from established hash. */
if (sk->pprev) { if (sk->pprev) {
if(sk->next) if(sk->next)
sk->next->pprev = sk->pprev; sk->next->pprev = sk->pprev;
...@@ -315,7 +328,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -315,7 +328,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
sock_prot_dec_use(sk->prot); sock_prot_dec_use(sk->prot);
} }
/* Step 2: Hash TW into TIMEWAIT half of established hash table. */ /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
head = &(ehead + tcp_ehash_size)->chain; head = &(ehead + tcp_ehash_size)->chain;
sktw = (struct sock *)tw; sktw = (struct sock *)tw;
if((sktw->next = *head) != NULL) if((sktw->next = *head) != NULL)
...@@ -325,20 +338,6 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -325,20 +338,6 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
atomic_inc(&tw->refcnt); atomic_inc(&tw->refcnt);
write_unlock(&ehead->lock); write_unlock(&ehead->lock);
/* Step 3: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
tw->tb = (struct tcp_bind_bucket *)sk->prev;
BUG_TRAP(sk->prev!=NULL);
if ((tw->bind_next = tw->tb->owners) != NULL)
tw->tb->owners->bind_pprev = &tw->bind_next;
tw->tb->owners = (struct sock*)tw;
tw->bind_pprev = &tw->tb->owners;
spin_unlock(&bhead->lock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment