Commit 75e908c3 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

mptcp: use fast lock for subflows when possible

There are a bunch of callsite where the ssk socket
lock is acquired using the full-blown version eligible for
the fast variant. Let's move to the latter.
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8ce568ed
...@@ -540,6 +540,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) ...@@ -540,6 +540,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node); subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
if (subflow) { if (subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow;
spin_unlock_bh(&msk->pm.lock); spin_unlock_bh(&msk->pm.lock);
pr_debug("send ack for %s%s%s", pr_debug("send ack for %s%s%s",
...@@ -547,9 +548,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) ...@@ -547,9 +548,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "", mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
mptcp_pm_should_add_signal_port(msk) ? " [port]" : ""); mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
lock_sock(ssk); slow = lock_sock_fast(ssk);
tcp_send_ack(ssk); tcp_send_ack(ssk);
release_sock(ssk); unlock_sock_fast(ssk, slow);
spin_lock_bh(&msk->pm.lock); spin_lock_bh(&msk->pm.lock);
} }
} }
...@@ -566,6 +567,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, ...@@ -566,6 +567,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
struct sock *sk = (struct sock *)msk; struct sock *sk = (struct sock *)msk;
struct mptcp_addr_info local; struct mptcp_addr_info local;
bool slow;
local_address((struct sock_common *)ssk, &local); local_address((struct sock_common *)ssk, &local);
if (!addresses_equal(&local, addr, addr->port)) if (!addresses_equal(&local, addr, addr->port))
...@@ -578,9 +580,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, ...@@ -578,9 +580,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
spin_unlock_bh(&msk->pm.lock); spin_unlock_bh(&msk->pm.lock);
pr_debug("send ack for mp_prio"); pr_debug("send ack for mp_prio");
lock_sock(ssk); slow = lock_sock_fast(ssk);
tcp_send_ack(ssk); tcp_send_ack(ssk);
release_sock(ssk); unlock_sock_fast(ssk, slow);
spin_lock_bh(&msk->pm.lock); spin_lock_bh(&msk->pm.lock);
return 0; return 0;
......
...@@ -433,23 +433,25 @@ static void mptcp_send_ack(struct mptcp_sock *msk) ...@@ -433,23 +433,25 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_for_each_subflow(msk, subflow) { mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow;
lock_sock(ssk); slow = lock_sock_fast(ssk);
if (tcp_can_send_ack(ssk)) if (tcp_can_send_ack(ssk))
tcp_send_ack(ssk); tcp_send_ack(ssk);
release_sock(ssk); unlock_sock_fast(ssk, slow);
} }
} }
static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk) static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
{ {
bool slow;
int ret; int ret;
lock_sock(ssk); slow = lock_sock_fast(ssk);
ret = tcp_can_send_ack(ssk); ret = tcp_can_send_ack(ssk);
if (ret) if (ret)
tcp_cleanup_rbuf(ssk, 1); tcp_cleanup_rbuf(ssk, 1);
release_sock(ssk); unlock_sock_fast(ssk, slow);
return ret; return ret;
} }
...@@ -2252,13 +2254,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) ...@@ -2252,13 +2254,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
bool slow;
lock_sock(tcp_sk); slow = lock_sock_fast(tcp_sk);
if (tcp_sk->sk_state != TCP_CLOSE) { if (tcp_sk->sk_state != TCP_CLOSE) {
tcp_send_active_reset(tcp_sk, GFP_ATOMIC); tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
tcp_set_state(tcp_sk, TCP_CLOSE); tcp_set_state(tcp_sk, TCP_CLOSE);
} }
release_sock(tcp_sk); unlock_sock_fast(tcp_sk, slow);
} }
inet_sk_state_store(sk, TCP_CLOSE); inet_sk_state_store(sk, TCP_CLOSE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment