Commit b03faa1f authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

net/smc: postpone release of clcsock

According to RFC7609 (http://www.rfc-editor.org/info/rfc7609)
first the SMC-R connection is shut down and then the normal TCP
connection FIN processing drives cleanup of the internal TCP connection.
The unconditional release of the clcsock during active socket closing
has to be postponed if the peer has not yet signalled socket closing.
Signed-off-by: default avatarUrsula Braun <ubraun@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 41c80be2
...@@ -145,32 +145,33 @@ static int smc_release(struct socket *sock) ...@@ -145,32 +145,33 @@ static int smc_release(struct socket *sock)
rc = smc_close_active(smc); rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK; sk->sk_shutdown |= SHUTDOWN_MASK;
} } else {
if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
sk->sk_prot->unhash(sk); sock_put(sk); /* passive closing */
if (sk->sk_state == SMC_LISTEN) {
if (smc->clcsock) {
if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
/* wake up clcsock accept */ /* wake up clcsock accept */
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
} }
mutex_lock(&smc->clcsock_release_lock);
sock_release(smc->clcsock);
smc->clcsock = NULL;
mutex_unlock(&smc->clcsock_release_lock);
}
if (smc->use_fallback) {
if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk); sk->sk_state_change(sk);
} }
sk->sk_prot->unhash(sk);
if (sk->sk_state == SMC_CLOSED) {
if (smc->clcsock) {
mutex_lock(&smc->clcsock_release_lock);
sock_release(smc->clcsock);
smc->clcsock = NULL;
mutex_unlock(&smc->clcsock_release_lock);
}
if (!smc->use_fallback)
smc_conn_free(&smc->conn);
}
/* detach socket */ /* detach socket */
sock_orphan(sk); sock_orphan(sk);
sock->sk = NULL; sock->sk = NULL;
if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
release_sock(sk); release_sock(sk);
sock_put(sk); /* final sock_put */ sock_put(sk); /* final sock_put */
......
...@@ -405,8 +405,13 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -405,8 +405,13 @@ static void smc_close_passive_work(struct work_struct *work)
if (old_state != sk->sk_state) { if (old_state != sk->sk_state) {
sk->sk_state_change(sk); sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) && if ((sk->sk_state == SMC_CLOSED) &&
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn); smc_conn_free(conn);
if (smc->clcsock) {
sock_release(smc->clcsock);
smc->clcsock = NULL;
}
}
} }
release_sock(sk); release_sock(sk);
sock_put(sk); /* sock_hold done by schedulers of close_work */ sock_put(sk); /* sock_hold done by schedulers of close_work */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment