Commit dcebeb8b authored by David S. Miller's avatar David S. Miller

Merge branch 'mptcp-fixes'

Mat Martineau says:

====================
mptcp: A few fixes

This set has three separate changes for the net-next tree:

Patch 1 guarantees safe handling and a warning if a NULL value is
encountered when gathering subflow data for the MPTCP_SUBFLOW_ADDRS
socket option.

Patch 2 increases the default number of subflows allowed per MPTCP
connection.

Patch 3 makes an existing function 'static'.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 295711fa 3828c514
...@@ -654,7 +654,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) ...@@ -654,7 +654,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
} }
} }
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
struct mptcp_addr_info *addr, struct mptcp_addr_info *addr,
u8 bkup) u8 bkup)
{ {
...@@ -2052,6 +2052,9 @@ static int __net_init pm_nl_init_net(struct net *net) ...@@ -2052,6 +2052,9 @@ static int __net_init pm_nl_init_net(struct net *net)
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id); struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
INIT_LIST_HEAD_RCU(&pernet->local_addr_list); INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
/* Cit. 2 subflows ought to be enough for anybody. */
pernet->subflows_max = 2;
pernet->next_id = 1; pernet->next_id = 1;
pernet->stale_loss_cnt = 4; pernet->stale_loss_cnt = 4;
spin_lock_init(&pernet->lock); spin_lock_init(&pernet->lock);
......
...@@ -736,9 +736,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk); ...@@ -736,9 +736,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list); const struct mptcp_rm_list *rm_list);
void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup); void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
struct mptcp_addr_info *addr,
u8 bkup);
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq); void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq);
void mptcp_pm_free_anno_list(struct mptcp_sock *msk); void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
......
...@@ -861,6 +861,9 @@ static void mptcp_get_sub_addrs(const struct sock *sk, struct mptcp_subflow_addr ...@@ -861,6 +861,9 @@ static void mptcp_get_sub_addrs(const struct sock *sk, struct mptcp_subflow_addr
} else if (sk->sk_family == AF_INET6) { } else if (sk->sk_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk);
if (WARN_ON_ONCE(!np))
return;
a->sin6_local.sin6_family = AF_INET6; a->sin6_local.sin6_family = AF_INET6;
a->sin6_local.sin6_port = inet->inet_sport; a->sin6_local.sin6_port = inet->inet_sport;
......
...@@ -945,12 +945,15 @@ subflows_tests() ...@@ -945,12 +945,15 @@ subflows_tests()
# subflow limited by client # subflow limited by client
reset reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 0
ip netns exec $ns2 ./pm_nl_ctl limits 0 0
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1 run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "single subflow, limited by client" 0 0 0 chk_join_nr "single subflow, limited by client" 0 0 0
# subflow limited by server # subflow limited by server
reset reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 0
ip netns exec $ns2 ./pm_nl_ctl limits 0 1 ip netns exec $ns2 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1 run_tests $ns1 $ns2 10.0.1.1
...@@ -973,7 +976,7 @@ subflows_tests() ...@@ -973,7 +976,7 @@ subflows_tests()
run_tests $ns1 $ns2 10.0.1.1 run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows" 2 2 2 chk_join_nr "multiple subflows" 2 2 2
# multiple subflows limited by serverf # multiple subflows limited by server
reset reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 1 ip netns exec $ns1 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl limits 0 2 ip netns exec $ns2 ./pm_nl_ctl limits 0 2
......
...@@ -70,7 +70,7 @@ check() ...@@ -70,7 +70,7 @@ check()
check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list" check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list"
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
subflows 0" "defaults limits" subflows 2" "defaults limits"
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1 ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo
...@@ -118,11 +118,11 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs" ...@@ -118,11 +118,11 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs"
ip netns exec $ns1 ./pm_nl_ctl limits 9 1 ip netns exec $ns1 ./pm_nl_ctl limits 9 1
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
subflows 0" "rcv addrs above hard limit" subflows 2" "rcv addrs above hard limit"
ip netns exec $ns1 ./pm_nl_ctl limits 1 9 ip netns exec $ns1 ./pm_nl_ctl limits 1 9
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
subflows 0" "subflows above hard limit" subflows 2" "subflows above hard limit"
ip netns exec $ns1 ./pm_nl_ctl limits 8 8 ip netns exec $ns1 ./pm_nl_ctl limits 8 8
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment