Commit 3541f9e8 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: add tcp_mss_clamp() helper

Small cleanup factorizing code doing the TCP_MAXSEG clamping.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ff3edc9b
...@@ -445,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) ...@@ -445,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
/* We use READ_ONCE() here because socket might not be locked.
* This happens for listeners.
*/
u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
return (user_mss && user_mss < mss) ? user_mss : mss;
}
#endif /* _LINUX_TCP_H */ #endif /* _LINUX_TCP_H */
...@@ -1324,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ...@@ -1324,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
tcp_ca_openreq_child(newsk, dst); tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst)); tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk); tcp_initialize_rcv_mss(newsk);
......
...@@ -360,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req, ...@@ -360,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req,
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
const struct tcp_sock *tp = tcp_sk(sk_listener); const struct tcp_sock *tp = tcp_sk(sk_listener);
u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
int full_space = tcp_full_space(sk_listener); int full_space = tcp_full_space(sk_listener);
int mss = dst_metric_advmss(dst);
u32 window_clamp; u32 window_clamp;
__u8 rcv_wscale; __u8 rcv_wscale;
int mss;
if (user_mss && user_mss < mss) mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
mss = user_mss;
window_clamp = READ_ONCE(tp->window_clamp); window_clamp = READ_ONCE(tp->window_clamp);
/* Set this up on the first call only */ /* Set this up on the first call only */
req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
......
...@@ -3062,7 +3062,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, ...@@ -3062,7 +3062,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct sk_buff *skb; struct sk_buff *skb;
int tcp_header_size; int tcp_header_size;
struct tcphdr *th; struct tcphdr *th;
u16 user_mss;
int mss; int mss;
skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
...@@ -3092,10 +3091,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, ...@@ -3092,10 +3091,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
} }
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
mss = dst_metric_advmss(dst); mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
user_mss = READ_ONCE(tp->rx_opt.user_mss);
if (user_mss && user_mss < mss)
mss = user_mss;
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
...@@ -3201,9 +3197,7 @@ static void tcp_connect_init(struct sock *sk) ...@@ -3201,9 +3197,7 @@ static void tcp_connect_init(struct sock *sk)
if (!tp->window_clamp) if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = dst_metric_advmss(dst); tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
tp->advmss = tp->rx_opt.user_mss;
tcp_initialize_rcv_mss(sk); tcp_initialize_rcv_mss(sk);
...@@ -3280,8 +3274,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) ...@@ -3280,8 +3274,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
* user-MSS. Reserve maximum option space for middleboxes that add * user-MSS. Reserve maximum option space for middleboxes that add
* private TCP options. The cost is reduced data space in SYN :( * private TCP options. The cost is reduced data space in SYN :(
*/ */
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE; MAX_TCP_OPTION_SPACE;
......
...@@ -1147,10 +1147,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ...@@ -1147,10 +1147,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
tcp_ca_openreq_child(newsk, dst); tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst)); tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk); tcp_initialize_rcv_mss(newsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment