Commit 0784c25d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mptcp-miscellaneous-fixes-and-a-new-test-case'

Mat Martineau says:

====================
mptcp: Miscellaneous fixes and a new test case

Patches 1 and 3 remove helpers that were iterating over the subflow
connection list without proper locking. Iteration was not needed in
either case.

Patch 2 fixes handling of MP_FAIL timeout, checking for orphaned
subflows instead of using the MPTCP socket data lock and connection
state.

Patch 4 adds a test for MP_FAIL timeout using tc pedit to induce checksum
failures.
====================

Link: https://lore.kernel.org/r/20220518220446.209750-1-mathew.j.martineau@linux.intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9ad084d6 2ba18161
...@@ -303,7 +303,7 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) ...@@ -303,7 +303,7 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
pr_debug("fail_seq=%llu", fail_seq); pr_debug("fail_seq=%llu", fail_seq);
if (mptcp_has_another_subflow(sk) || !READ_ONCE(msk->allow_infinite_fallback)) if (!READ_ONCE(msk->allow_infinite_fallback))
return; return;
if (!READ_ONCE(subflow->mp_fail_response_expect)) { if (!READ_ONCE(subflow->mp_fail_response_expect)) {
...@@ -312,13 +312,10 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) ...@@ -312,13 +312,10 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1; subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1; subflow->send_infinite_map = 1;
} else if (s && inet_sk_state_load(s) != TCP_CLOSE) { } else if (!sock_flag(sk, SOCK_DEAD)) {
pr_debug("MP_FAIL response received"); pr_debug("MP_FAIL response received");
mptcp_data_lock(s);
if (inet_sk_state_load(s) != TCP_CLOSE)
sk_stop_timer(s, &s->sk_timer); sk_stop_timer(s, &s->sk_timer);
mptcp_data_unlock(s);
} }
} }
......
...@@ -2190,23 +2190,10 @@ mp_fail_response_expect_subflow(struct mptcp_sock *msk) ...@@ -2190,23 +2190,10 @@ mp_fail_response_expect_subflow(struct mptcp_sock *msk)
return ret; return ret;
} }
static void mptcp_check_mp_fail_response(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
bh_lock_sock(sk);
subflow = mp_fail_response_expect_subflow(msk);
if (subflow)
__set_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags);
bh_unlock_sock(sk);
}
static void mptcp_timeout_timer(struct timer_list *t) static void mptcp_timeout_timer(struct timer_list *t)
{ {
struct sock *sk = from_timer(sk, t, sk_timer); struct sock *sk = from_timer(sk, t, sk_timer);
mptcp_check_mp_fail_response(mptcp_sk(sk));
mptcp_schedule_work(sk); mptcp_schedule_work(sk);
sock_put(sk); sock_put(sk);
} }
...@@ -2588,7 +2575,6 @@ static void mptcp_worker(struct work_struct *work) ...@@ -2588,7 +2575,6 @@ static void mptcp_worker(struct work_struct *work)
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk); __mptcp_retrans(sk);
if (test_and_clear_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags))
mptcp_mp_fail_no_response(msk); mptcp_mp_fail_no_response(msk);
unlock: unlock:
......
...@@ -117,7 +117,6 @@ ...@@ -117,7 +117,6 @@
#define MPTCP_WORK_EOF 3 #define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4 #define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5 #define MPTCP_WORK_CLOSE_SUBFLOW 5
#define MPTCP_FAIL_NO_RESPONSE 6
/* MPTCP socket release cb flags */ /* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1 #define MPTCP_PUSH_PENDING 1
...@@ -650,19 +649,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, ...@@ -650,19 +649,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
} }
static inline bool mptcp_has_another_subflow(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk), *tmp;
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
mptcp_for_each_subflow(msk, tmp) {
if (tmp != subflow)
return true;
}
return false;
}
void __init mptcp_proto_init(void); void __init mptcp_proto_init(void);
#if IS_ENABLED(CONFIG_MPTCP_IPV6) #if IS_ENABLED(CONFIG_MPTCP_IPV6)
int __init mptcp_proto_v6_init(void); int __init mptcp_proto_v6_init(void);
......
...@@ -1016,12 +1016,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk, ...@@ -1016,12 +1016,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received"); pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0; subflow->map_data_len = 0;
if (sk && inet_sk_state_load(sk) != TCP_CLOSE) { if (!sock_flag(ssk, SOCK_DEAD))
mptcp_data_lock(sk);
if (inet_sk_state_load(sk) != TCP_CLOSE)
sk_stop_timer(sk, &sk->sk_timer); sk_stop_timer(sk, &sk->sk_timer);
mptcp_data_unlock(sk);
}
return MAPPING_INVALID; return MAPPING_INVALID;
} }
...@@ -1233,8 +1230,7 @@ static bool subflow_check_data_avail(struct sock *ssk) ...@@ -1233,8 +1230,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
if (!__mptcp_check_fallback(msk)) { if (!__mptcp_check_fallback(msk)) {
/* RFC 8684 section 3.7. */ /* RFC 8684 section 3.7. */
if (subflow->send_mp_fail) { if (subflow->send_mp_fail) {
if (mptcp_has_another_subflow(ssk) || if (!READ_ONCE(msk->allow_infinite_fallback)) {
!READ_ONCE(msk->allow_infinite_fallback)) {
ssk->sk_err = EBADMSG; ssk->sk_err = EBADMSG;
tcp_set_state(ssk, TCP_CLOSE); tcp_set_state(ssk, TCP_CLOSE);
subflow->reset_transient = 0; subflow->reset_transient = 0;
...@@ -1242,9 +1238,8 @@ static bool subflow_check_data_avail(struct sock *ssk) ...@@ -1242,9 +1238,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
tcp_send_active_reset(ssk, GFP_ATOMIC); tcp_send_active_reset(ssk, GFP_ATOMIC);
while ((skb = skb_peek(&ssk->sk_receive_queue))) while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb); sk_eat_skb(ssk, skb);
} else { } else if (!sock_flag(ssk, SOCK_DEAD)) {
WRITE_ONCE(subflow->mp_fail_response_expect, true); WRITE_ONCE(subflow->mp_fail_response_expect, true);
/* The data lock is acquired in __mptcp_move_skbs() */
sk_reset_timer((struct sock *)msk, sk_reset_timer((struct sock *)msk,
&((struct sock *)msk)->sk_timer, &((struct sock *)msk)->sk_timer,
jiffies + TCP_RTO_MAX); jiffies + TCP_RTO_MAX);
......
...@@ -2732,6 +2732,16 @@ fail_tests() ...@@ -2732,6 +2732,16 @@ fail_tests()
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)" chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
chk_fail_nr 1 -1 invert chk_fail_nr 1 -1 invert
fi fi
# multiple subflows
if reset_with_fail "MP_FAIL MP_RST" 2; then
tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
run_tests $ns1 $ns2 10.0.1.1 1024
chk_join_nr 1 1 1 1 0 1 1 0 "$(pedit_action_pkts)"
fi
} }
userspace_tests() userspace_tests()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment