Commit 49fb2f33 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mptcp-some-fallback-fixes'

Paolo Abeni says:

====================
mptcp: some fallback fixes

pktdrill pointed-out we currently don't handle properly some
fallback scenario for MP_JOIN subflows

The first patch addresses such issue.

Patch 2/2 fixes a related pre-existing issue that is more
evident after 1/2: we could keep using for MPTCP signaling
closed subflows.
====================
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 16cb3653 0e4f35d7
......@@ -626,6 +626,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
if (unlikely(mptcp_check_fallback(sk)))
return false;
/* prevent adding of any MPTCP related options on reset packet
* until we support MP_TCPRST/MP_FASTCLOSE
*/
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
return false;
if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
ret = true;
else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
......@@ -676,7 +682,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
return false;
}
static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
struct mptcp_subflow_context *subflow,
struct sk_buff *skb,
struct mptcp_options_received *mp_opt)
......@@ -693,15 +699,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
subflow->mp_join && mp_opt->mp_join &&
READ_ONCE(msk->pm.server_side))
tcp_send_ack(sk);
tcp_send_ack(ssk);
goto fully_established;
}
/* we should process OoO packets before the first subflow is fully
* established, but not expected for MP_JOIN subflows
/* we must process OoO packets before the first subflow is fully
* established. OoO packets are instead a protocol violation
* for MP_JOIN subflows as the peer must not send any data
* before receiving the forth ack - cfr. RFC 8684 section 3.2.
*/
if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
if (subflow->mp_join)
goto reset;
return subflow->mp_capable;
}
if (mp_opt->dss && mp_opt->use_ack) {
/* subflows are fully established as soon as we get any
......@@ -713,9 +724,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
}
/* If the first established packet does not contain MP_CAPABLE + data
* then fallback to TCP
* then fallback to TCP. Fallback scenarios requires a reset for
* MP_JOIN subflows.
*/
if (!mp_opt->mp_capable) {
if (subflow->mp_join)
goto reset;
subflow->mp_capable = 0;
pr_fallback(msk);
__mptcp_do_fallback(msk);
......@@ -732,12 +746,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
subflow->pm_notified = 1;
if (subflow->mp_join) {
clear_3rdack_retransmission(sk);
clear_3rdack_retransmission(ssk);
mptcp_pm_subflow_established(msk, subflow);
} else {
mptcp_pm_fully_established(msk);
}
return true;
reset:
mptcp_subflow_reset(ssk);
return false;
}
static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
......
......@@ -1383,6 +1383,20 @@ static void pm_work(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
static void __mptcp_close_subflow(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow, *tmp;
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (inet_sk_state_load(ssk) != TCP_CLOSE)
continue;
__mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
}
}
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
......@@ -1400,6 +1414,9 @@ static void mptcp_worker(struct work_struct *work)
mptcp_clean_una(sk);
mptcp_check_data_fin_ack(sk);
__mptcp_flush_join_list(msk);
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(msk);
__mptcp_move_skbs(msk);
if (msk->pm.status)
......
......@@ -90,6 +90,7 @@
#define MPTCP_WORK_RTX 2
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
struct mptcp_options_received {
u64 sndr_key;
......@@ -348,6 +349,7 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
void __init mptcp_subflow_init(void);
void mptcp_subflow_reset(struct sock *ssk);
/* called with sk socket lock held */
int __mptcp_subflow_connect(struct sock *sk, int ifindex,
......
......@@ -270,6 +270,19 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
return thmac == subflow->thmac;
}
void mptcp_subflow_reset(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
schedule_work(&mptcp_sk(sk)->work))
sock_hold(sk);
}
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
......@@ -342,8 +355,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
return;
do_reset:
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
mptcp_subflow_reset(sk);
}
struct request_sock_ops mptcp_subflow_request_sock_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment