Commit 9804985b authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by David S. Miller

udp: Introduce optional per-netns hash table.

The maximum hash table size is 64K due to the nature of the protocol. [0]
It's smaller than TCP, and fewer sockets can cause a performance drop.

On an EC2 c5.24xlarge instance (192 GiB memory), after running iperf3 in
different netns, creating 32Mi sockets without data transfer in the root
netns causes regression for the iperf3's connection.

  uhash_entries		sockets		length		Gbps
	    64K		      1		     1		5.69
			    1Mi		    16		5.27
			    2Mi		    32		4.90
			    4Mi		    64		4.09
			    8Mi		   128		2.96
			   16Mi		   256		2.06
			   32Mi		   512		1.12

The per-netns hash table breaks the lengthy lists into shorter ones.  It is
useful on a multi-tenant system with thousands of netns.  With smaller hash
tables, we can look up sockets faster, isolate noisy neighbours, and reduce
lock contention.

The max size of the per-netns table is 64K as well.  This is because the
possible hash range by udp_hashfn() always fits in 64K within the same
netns and we cannot make full use of the whole buckets larger than 64K.

  /* 0 < num < 64K  ->  X < hash < X + 64K */
  (num + net_hash_mix(net)) & mask;

Also, the min size is 128.  We use a bitmap to search for an available
port in udp_lib_get_port().  To keep the bitmap on the stack and not
fire the CONFIG_FRAME_WARN error at build time, we round up the table
size to 128.

The sysctl usage is the same with TCP:

  $ dmesg | cut -d ' ' -f 6- | grep "UDP hash"
  UDP hash table entries: 65536 (order: 9, 2097152 bytes, vmalloc)

  # sysctl net.ipv4.udp_hash_entries
  net.ipv4.udp_hash_entries = 65536  # can be changed by uhash_entries

  # sysctl net.ipv4.udp_child_hash_entries
  net.ipv4.udp_child_hash_entries = 0  # disabled by default

  # ip netns add test1
  # ip netns exec test1 sysctl net.ipv4.udp_hash_entries
  net.ipv4.udp_hash_entries = -65536  # share the global table

  # sysctl -w net.ipv4.udp_child_hash_entries=100
  net.ipv4.udp_child_hash_entries = 100

  # ip netns add test2
  # ip netns exec test2 sysctl net.ipv4.udp_hash_entries
  net.ipv4.udp_hash_entries = 128  # own a per-netns table with 2^n buckets

We could optimise the hash table lookup/iteration further by removing
the netns comparison for the per-netns one in the future.  Also, we
could optimise the sparse udp_hslot layout by putting it in udp_table.

[0]: https://lore.kernel.org/netdev/4ACC2815.7010101@gmail.com/Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba6aac15
...@@ -1177,6 +1177,33 @@ udp_rmem_min - INTEGER ...@@ -1177,6 +1177,33 @@ udp_rmem_min - INTEGER
udp_wmem_min - INTEGER udp_wmem_min - INTEGER
UDP does not have tx memory accounting and this tunable has no effect. UDP does not have tx memory accounting and this tunable has no effect.
udp_hash_entries - INTEGER
Show the number of hash buckets for UDP sockets in the current
networking namespace.
A negative value means the networking namespace does not own its
hash buckets and shares the initial networking namespace's one.
udp_child_ehash_entries - INTEGER
Control the number of hash buckets for UDP sockets in the child
networking namespace, which must be set before clone() or unshare().
If the value is not 0, the kernel uses a value rounded up to 2^n
as the actual hash bucket size. 0 is a special value, meaning
the child networking namespace will share the initial networking
namespace's hash buckets.
Note that the child will use the global one in case the kernel
fails to allocate enough memory. In addition, the global hash
buckets are spread over available NUMA nodes, but the allocation
of the child hash table depends on the current process's NUMA
policy, which could result in performance differences.
Possible values: 0, 2^n (n: 7 (128) - 16 (64K))
Default: 0
RAW variables RAW variables
============= =============
......
...@@ -23,7 +23,9 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) ...@@ -23,7 +23,9 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
return (struct udphdr *)skb_transport_header(skb); return (struct udphdr *)skb_transport_header(skb);
} }
#define UDP_HTABLE_SIZE_MIN_PERNET 128
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
#define UDP_HTABLE_SIZE_MAX 65536
static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{ {
......
...@@ -208,6 +208,8 @@ struct netns_ipv4 { ...@@ -208,6 +208,8 @@ struct netns_ipv4 {
atomic_t dev_addr_genid; atomic_t dev_addr_genid;
unsigned int sysctl_udp_child_hash_entries;
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
unsigned long *sysctl_local_reserved_ports; unsigned long *sysctl_local_reserved_ports;
int sysctl_ip_prot_sock; int sysctl_ip_prot_sock;
......
...@@ -40,6 +40,7 @@ static int one_day_secs = 24 * 3600; ...@@ -40,6 +40,7 @@ static int one_day_secs = 24 * 3600;
static u32 fib_multipath_hash_fields_all_mask __maybe_unused = static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
FIB_MULTIPATH_HASH_FIELD_ALL_MASK; FIB_MULTIPATH_HASH_FIELD_ALL_MASK;
static unsigned int tcp_child_ehash_entries_max = 16 * 1024 * 1024; static unsigned int tcp_child_ehash_entries_max = 16 * 1024 * 1024;
static unsigned int udp_child_hash_entries_max = UDP_HTABLE_SIZE_MAX;
static int tcp_plb_max_rounds = 31; static int tcp_plb_max_rounds = 31;
static int tcp_plb_max_cong_thresh = 256; static int tcp_plb_max_cong_thresh = 256;
...@@ -402,12 +403,36 @@ static int proc_tcp_ehash_entries(struct ctl_table *table, int write, ...@@ -402,12 +403,36 @@ static int proc_tcp_ehash_entries(struct ctl_table *table, int write,
if (!net_eq(net, &init_net) && !hinfo->pernet) if (!net_eq(net, &init_net) && !hinfo->pernet)
tcp_ehash_entries *= -1; tcp_ehash_entries *= -1;
memset(&tbl, 0, sizeof(tbl));
tbl.data = &tcp_ehash_entries; tbl.data = &tcp_ehash_entries;
tbl.maxlen = sizeof(int); tbl.maxlen = sizeof(int);
return proc_dointvec(&tbl, write, buffer, lenp, ppos); return proc_dointvec(&tbl, write, buffer, lenp, ppos);
} }
static int proc_udp_hash_entries(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_udp_child_hash_entries);
int udp_hash_entries;
struct ctl_table tbl;
udp_hash_entries = net->ipv4.udp_table->mask + 1;
/* A negative number indicates that the child netns
* shares the global udp_table.
*/
if (!net_eq(net, &init_net) && net->ipv4.udp_table == &udp_table)
udp_hash_entries *= -1;
memset(&tbl, 0, sizeof(tbl));
tbl.data = &udp_hash_entries;
tbl.maxlen = sizeof(int);
return proc_dointvec(&tbl, write, buffer, lenp, ppos);
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH #ifdef CONFIG_IP_ROUTE_MULTIPATH
static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
void *buffer, size_t *lenp, void *buffer, size_t *lenp,
...@@ -1361,6 +1386,21 @@ static struct ctl_table ipv4_net_table[] = { ...@@ -1361,6 +1386,21 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = SYSCTL_ZERO, .extra1 = SYSCTL_ZERO,
.extra2 = &tcp_child_ehash_entries_max, .extra2 = &tcp_child_ehash_entries_max,
}, },
{
.procname = "udp_hash_entries",
.data = &init_net.ipv4.sysctl_udp_child_hash_entries,
.mode = 0444,
.proc_handler = proc_udp_hash_entries,
},
{
.procname = "udp_child_hash_entries",
.data = &init_net.ipv4.sysctl_udp_child_hash_entries,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &udp_child_hash_entries_max,
},
{ {
.procname = "udp_rmem_min", .procname = "udp_rmem_min",
.data = &init_net.ipv4.sysctl_udp_rmem_min, .data = &init_net.ipv4.sysctl_udp_rmem_min,
......
...@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); ...@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc);
#define MAX_UDP_PORTS 65536 #define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET)
static struct udp_table *udp_get_table_prot(struct sock *sk) static struct udp_table *udp_get_table_prot(struct sock *sk)
{ {
...@@ -3277,7 +3277,7 @@ void __init udp_table_init(struct udp_table *table, const char *name) ...@@ -3277,7 +3277,7 @@ void __init udp_table_init(struct udp_table *table, const char *name)
&table->log, &table->log,
&table->mask, &table->mask,
UDP_HTABLE_SIZE_MIN, UDP_HTABLE_SIZE_MIN,
64 * 1024); UDP_HTABLE_SIZE_MAX);
table->hash2 = table->hash + (table->mask + 1); table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) { for (i = 0; i <= table->mask; i++) {
...@@ -3302,22 +3302,111 @@ u32 udp_flow_hashrnd(void) ...@@ -3302,22 +3302,111 @@ u32 udp_flow_hashrnd(void)
} }
EXPORT_SYMBOL(udp_flow_hashrnd); EXPORT_SYMBOL(udp_flow_hashrnd);
static int __net_init udp_sysctl_init(struct net *net) static void __net_init udp_sysctl_init(struct net *net)
{ {
net->ipv4.udp_table = &udp_table;
net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE; net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE;
net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE; net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE;
#ifdef CONFIG_NET_L3_MASTER_DEV #ifdef CONFIG_NET_L3_MASTER_DEV
net->ipv4.sysctl_udp_l3mdev_accept = 0; net->ipv4.sysctl_udp_l3mdev_accept = 0;
#endif #endif
}
static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries)
{
struct udp_table *udptable;
int i;
udptable = kmalloc(sizeof(*udptable), GFP_KERNEL);
if (!udptable)
goto out;
udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot),
GFP_KERNEL_ACCOUNT);
if (!udptable->hash)
goto free_table;
udptable->hash2 = udptable->hash + hash_entries;
udptable->mask = hash_entries - 1;
udptable->log = ilog2(hash_entries);
for (i = 0; i < hash_entries; i++) {
INIT_HLIST_HEAD(&udptable->hash[i].head);
udptable->hash[i].count = 0;
spin_lock_init(&udptable->hash[i].lock);
INIT_HLIST_HEAD(&udptable->hash2[i].head);
udptable->hash2[i].count = 0;
spin_lock_init(&udptable->hash2[i].lock);
}
return udptable;
free_table:
kfree(udptable);
out:
return NULL;
}
static void __net_exit udp_pernet_table_free(struct net *net)
{
struct udp_table *udptable = net->ipv4.udp_table;
if (udptable == &udp_table)
return;
kvfree(udptable->hash);
kfree(udptable);
}
static void __net_init udp_set_table(struct net *net)
{
struct udp_table *udptable;
unsigned int hash_entries;
struct net *old_net;
if (net_eq(net, &init_net))
goto fallback;
old_net = current->nsproxy->net_ns;
hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries);
if (!hash_entries)
goto fallback;
/* Set min to keep the bitmap on stack in udp_lib_get_port() */
if (hash_entries < UDP_HTABLE_SIZE_MIN_PERNET)
hash_entries = UDP_HTABLE_SIZE_MIN_PERNET;
else
hash_entries = roundup_pow_of_two(hash_entries);
udptable = udp_pernet_table_alloc(hash_entries);
if (udptable) {
net->ipv4.udp_table = udptable;
} else {
pr_warn("Failed to allocate UDP hash table (entries: %u) "
"for a netns, fallback to the global one\n",
hash_entries);
fallback:
net->ipv4.udp_table = &udp_table;
}
}
static int __net_init udp_pernet_init(struct net *net)
{
udp_sysctl_init(net);
udp_set_table(net);
return 0; return 0;
} }
static void __net_exit udp_pernet_exit(struct net *net)
{
udp_pernet_table_free(net);
}
static struct pernet_operations __net_initdata udp_sysctl_ops = { static struct pernet_operations __net_initdata udp_sysctl_ops = {
.init = udp_sysctl_init, .init = udp_pernet_init,
.exit = udp_pernet_exit,
}; };
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment