Commit 48b6190a authored by D. Wythe's avatar D. Wythe Committed by David S. Miller

net/smc: Limit SMC visits when handshake workqueue congested

This patch intends to provide a mechanism to put constraint on SMC
connections visit according to the pressure of SMC handshake process.
At present, frequent visits will cause the incoming connections to be
backlogged in SMC handshake queue, raise the connections established
time. Which is quite unacceptable for those applications who base on
short lived connections.

There are two ways to implement this mechanism:

1. Put limitation after TCP established.
2. Put limitation before TCP established.

In the first way, we need to wait and receive CLC messages that the
client will potentially send, and then actively reply with a decline
message, in a sense, which is also a sort of SMC handshake, affect the
connections established time on its way.

In the second way, the only problem is that we need to inject SMC logic
into TCP when it is about to reply the incoming SYN, since we already do
that, it's seems not a problem anymore. And advantage is obvious, few
additional processes are required to complete the constraint.

This patch use the second way. After this patch, connections who beyond
constraint will not informed any SMC indication, and SMC will not be
involved in any of its subsequent processes.

Link: https://lore.kernel.org/all/1641301961-59331-1-git-send-email-alibuda@linux.alibaba.com/Signed-off-by: default avatarD. Wythe <alibuda@linux.alibaba.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8270d9c2
...@@ -394,6 +394,7 @@ struct tcp_sock { ...@@ -394,6 +394,7 @@ struct tcp_sock {
bool is_mptcp; bool is_mptcp;
#endif #endif
#if IS_ENABLED(CONFIG_SMC) #if IS_ENABLED(CONFIG_SMC)
bool (*smc_hs_congested)(const struct sock *sk);
bool syn_smc; /* SYN includes SMC */ bool syn_smc; /* SYN includes SMC */
#endif #endif
......
...@@ -6703,7 +6703,8 @@ static void tcp_openreq_init(struct request_sock *req, ...@@ -6703,7 +6703,8 @@ static void tcp_openreq_init(struct request_sock *req,
ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
ireq->ir_mark = inet_request_mark(sk, skb); ireq->ir_mark = inet_request_mark(sk, skb);
#if IS_ENABLED(CONFIG_SMC) #if IS_ENABLED(CONFIG_SMC)
ireq->smc_ok = rx_opt->smc_ok; ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
tcp_sk(sk)->smc_hs_congested(sk));
#endif #endif
} }
......
...@@ -103,6 +103,21 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, ...@@ -103,6 +103,21 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
return NULL; return NULL;
} }
static bool smc_hs_congested(const struct sock *sk)
{
const struct smc_sock *smc;
smc = smc_clcsock_user_data(sk);
if (!smc)
return true;
if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
return true;
return false;
}
static struct smc_hashinfo smc_v4_hashinfo = { static struct smc_hashinfo smc_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
}; };
...@@ -2311,6 +2326,8 @@ static int smc_listen(struct socket *sock, int backlog) ...@@ -2311,6 +2326,8 @@ static int smc_listen(struct socket *sock, int backlog)
inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
rc = kernel_listen(smc->clcsock, backlog); rc = kernel_listen(smc->clcsock, backlog);
if (rc) { if (rc) {
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment