Commit a60a2b1e authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

net/smc: reduce active tcp_listen workers

SMC starts a separate tcp_listen worker for every SMC socket in
state SMC_LISTEN, and can accept an incoming connection request only,
if this worker is really running and waiting in kernel_accept(). But
the number of running workers is limited.
This patch reworks the listening SMC code and starts a tcp_listen worker
after the SYN-ACK handshake on the internal clc-socket only.
Suggested-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarUrsula Braun <ubraun@linux.ibm.com>
Reviewed-by: default avatarGuvenc Gulce <guvenc@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b55353e2
...@@ -940,10 +940,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) ...@@ -940,10 +940,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
mutex_lock(&lsmc->clcsock_release_lock); mutex_lock(&lsmc->clcsock_release_lock);
if (lsmc->clcsock) if (lsmc->clcsock)
rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
mutex_unlock(&lsmc->clcsock_release_lock); mutex_unlock(&lsmc->clcsock_release_lock);
lock_sock(lsk); lock_sock(lsk);
if (rc < 0) if (rc < 0 && rc != -EAGAIN)
lsk->sk_err = -rc; lsk->sk_err = -rc;
if (rc < 0 || lsk->sk_state == SMC_CLOSED) { if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
new_sk->sk_prot->unhash(new_sk); new_sk->sk_prot->unhash(new_sk);
...@@ -956,6 +956,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) ...@@ -956,6 +956,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
goto out; goto out;
} }
/* new clcsock has inherited the smc listen-specific sk_data_ready
* function; switch it back to the original sk_data_ready function
*/
new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
(*new_smc)->clcsock = new_clcsock; (*new_smc)->clcsock = new_clcsock;
out: out:
return rc; return rc;
...@@ -1406,7 +1410,7 @@ static void smc_tcp_listen_work(struct work_struct *work) ...@@ -1406,7 +1410,7 @@ static void smc_tcp_listen_work(struct work_struct *work)
lock_sock(lsk); lock_sock(lsk);
while (lsk->sk_state == SMC_LISTEN) { while (lsk->sk_state == SMC_LISTEN) {
rc = smc_clcsock_accept(lsmc, &new_smc); rc = smc_clcsock_accept(lsmc, &new_smc);
if (rc) if (rc) /* clcsock accept queue empty or error */
goto out; goto out;
if (!new_smc) if (!new_smc)
continue; continue;
...@@ -1426,7 +1430,23 @@ static void smc_tcp_listen_work(struct work_struct *work) ...@@ -1426,7 +1430,23 @@ static void smc_tcp_listen_work(struct work_struct *work)
out: out:
release_sock(lsk); release_sock(lsk);
sock_put(&lsmc->sk); /* sock_hold in smc_listen */ sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
}
static void smc_clcsock_data_ready(struct sock *listen_clcsock)
{
struct smc_sock *lsmc;
lsmc = (struct smc_sock *)
((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
if (!lsmc)
return;
lsmc->clcsk_data_ready(listen_clcsock);
if (lsmc->sk.sk_state == SMC_LISTEN) {
sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
if (!schedule_work(&lsmc->tcp_listen_work))
sock_put(&lsmc->sk);
}
} }
static int smc_listen(struct socket *sock, int backlog) static int smc_listen(struct socket *sock, int backlog)
...@@ -1455,15 +1475,19 @@ static int smc_listen(struct socket *sock, int backlog) ...@@ -1455,15 +1475,19 @@ static int smc_listen(struct socket *sock, int backlog)
if (!smc->use_fallback) if (!smc->use_fallback)
tcp_sk(smc->clcsock->sk)->syn_smc = 1; tcp_sk(smc->clcsock->sk)->syn_smc = 1;
/* save original sk_data_ready function and establish
* smc-specific sk_data_ready function
*/
smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
rc = kernel_listen(smc->clcsock, backlog); rc = kernel_listen(smc->clcsock, backlog);
if (rc) if (rc)
goto out; goto out;
sk->sk_max_ack_backlog = backlog; sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0; sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN; sk->sk_state = SMC_LISTEN;
sock_hold(sk); /* sock_hold in tcp_listen_worker */
if (!schedule_work(&smc->tcp_listen_work))
sock_put(sk);
out: out:
release_sock(sk); release_sock(sk);
......
...@@ -201,6 +201,8 @@ struct smc_connection { ...@@ -201,6 +201,8 @@ struct smc_connection {
struct smc_sock { /* smc sock container */ struct smc_sock { /* smc sock container */
struct sock sk; struct sock sk;
struct socket *clcsock; /* internal tcp socket */ struct socket *clcsock; /* internal tcp socket */
void (*clcsk_data_ready)(struct sock *sk);
/* original data_ready fct. **/
struct smc_connection conn; /* smc connection */ struct smc_connection conn; /* smc connection */
struct smc_sock *listen_smc; /* listen parent */ struct smc_sock *listen_smc; /* listen parent */
struct work_struct connect_work; /* handle non-blocking connect*/ struct work_struct connect_work; /* handle non-blocking connect*/
......
...@@ -208,12 +208,11 @@ int smc_close_active(struct smc_sock *smc) ...@@ -208,12 +208,11 @@ int smc_close_active(struct smc_sock *smc)
break; break;
case SMC_LISTEN: case SMC_LISTEN:
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
smc->clcsock->sk->sk_user_data = NULL;
sk->sk_state_change(sk); /* wake up accept */ sk->sk_state_change(sk); /* wake up accept */
if (smc->clcsock && smc->clcsock->sk) { if (smc->clcsock && smc->clcsock->sk)
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
/* wake up kernel_accept of smc_tcp_listen_worker */
smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
}
smc_close_cleanup_listen(sk); smc_close_cleanup_listen(sk);
release_sock(sk); release_sock(sk);
flush_work(&smc->tcp_listen_work); flush_work(&smc->tcp_listen_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment