Commit fee83d09 authored by Haishuang Yan's avatar Haishuang Yan Committed by David S. Miller

ipv4: Namespaceify tcp_max_syn_backlog knob

Different namespace application might require different maximal
number of remembered connection requests.
Signed-off-by: default avatarHaishuang Yan <yanhaishuang@cmss.chinamobile.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1946e672
...@@ -122,6 +122,7 @@ struct netns_ipv4 { ...@@ -122,6 +122,7 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_notsent_lowat; unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse; int sysctl_tcp_tw_reuse;
struct inet_timewait_death_row tcp_death_row; struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_igmp_max_memberships; int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf; int sysctl_igmp_max_msf;
......
/* /*
* NET Generic infrastructure for Network protocols. * NET Generic infrastructure for Network protocols.
* *
* Definitions for request_sock * Definitions for request_sock
* *
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* *
...@@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req) ...@@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req)
reqsk_free(req); reqsk_free(req);
} }
extern int sysctl_max_syn_backlog;
/* /*
* For a TCP Fast Open listener - * For a TCP Fast Open listener -
* lock - protects the access to all the reqsk, which is co-owned by * lock - protects the access to all the reqsk, which is co-owned by
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
* and it will increase in proportion to the memory of machine. * and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too. * Note : Dont forget somaxconn that may limit backlog too.
*/ */
int sysctl_max_syn_backlog = 256;
EXPORT_SYMBOL(sysctl_max_syn_backlog);
void reqsk_queue_alloc(struct request_sock_queue *queue) void reqsk_queue_alloc(struct request_sock_queue *queue)
{ {
......
...@@ -323,13 +323,6 @@ static struct ctl_table ipv4_table[] = { ...@@ -323,13 +323,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec .proc_handler = proc_dointvec
}, },
{
.procname = "tcp_max_syn_backlog",
.data = &sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ {
.procname = "inet_peer_threshold", .procname = "inet_peer_threshold",
.data = &inet_peer_threshold, .data = &inet_peer_threshold,
...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = { ...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec .proc_handler = proc_dointvec
}, },
{
.procname = "tcp_max_syn_backlog",
.data = &init_net.ipv4.sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH #ifdef CONFIG_IP_ROUTE_MULTIPATH
{ {
.procname = "fib_multipath_use_neigh", .procname = "fib_multipath_use_neigh",
......
...@@ -3378,9 +3378,7 @@ void __init tcp_init(void) ...@@ -3378,9 +3378,7 @@ void __init tcp_init(void)
cnt = tcp_hashinfo.ehash_mask + 1; cnt = tcp_hashinfo.ehash_mask + 1;
sysctl_tcp_max_orphans = cnt / 2; sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
tcp_init_mem(); tcp_init_mem();
/* Set per-socket limits to no more than 1/128 the pressure threshold */ /* Set per-socket limits to no more than 1/128 the pressure threshold */
......
...@@ -6377,8 +6377,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -6377,8 +6377,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
} }
/* Kill the following clause, if you dislike this way. */ /* Kill the following clause, if you dislike this way. */
else if (!net->ipv4.sysctl_tcp_syncookies && else if (!net->ipv4.sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) && (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
!tcp_peer_is_proven(req, dst, false, !tcp_peer_is_proven(req, dst, false,
tmp_opt.saw_tstamp)) { tmp_opt.saw_tstamp)) {
/* Without syncookies last quarter of /* Without syncookies last quarter of
......
...@@ -2419,7 +2419,7 @@ static void __net_exit tcp_sk_exit(struct net *net) ...@@ -2419,7 +2419,7 @@ static void __net_exit tcp_sk_exit(struct net *net)
static int __net_init tcp_sk_init(struct net *net) static int __net_init tcp_sk_init(struct net *net)
{ {
int res, cpu; int res, cpu, cnt;
net->ipv4.tcp_sk = alloc_percpu(struct sock *); net->ipv4.tcp_sk = alloc_percpu(struct sock *);
if (!net->ipv4.tcp_sk) if (!net->ipv4.tcp_sk)
...@@ -2458,10 +2458,13 @@ static int __net_init tcp_sk_init(struct net *net) ...@@ -2458,10 +2458,13 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
net->ipv4.sysctl_tcp_tw_reuse = 0; net->ipv4.sysctl_tcp_tw_reuse = 0;
cnt = tcp_hashinfo.ehash_mask + 1;
net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (tcp_hashinfo.ehash_mask + 1) / 2; net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
return 0; return 0;
fail: fail:
tcp_sk_exit(net); tcp_sk_exit(net);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment