Commit 3749a8f0 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm: zero hash tables in allocator

Add a new flag HASH_ZERO which when provided grantees that the hash
table that is returned by alloc_large_system_hash() is zeroed.  In most
cases that is what is needed by the caller.  Use page level allocator's
__GFP_ZERO flags to zero the memory.  It is using memset() which is
efficient method to zero memory and is optimized for most platforms.

Link: http://lkml.kernel.org/r/1488432825-92126-3-git-send-email-pasha.tatashin@oracle.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: default avatarBabu Moger <babu.moger@oracle.com>
Cc: David Miller <davem@davemloft.net>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f7fb506f
...@@ -358,6 +358,7 @@ extern void *alloc_large_system_hash(const char *tablename, ...@@ -358,6 +358,7 @@ extern void *alloc_large_system_hash(const char *tablename,
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
* shift passed via *_hash_shift */ * shift passed via *_hash_shift */
#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have /* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space. * sufficient vmalloc space.
......
...@@ -7198,6 +7198,7 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7198,6 +7198,7 @@ void *__init alloc_large_system_hash(const char *tablename,
unsigned long long max = high_limit; unsigned long long max = high_limit;
unsigned long log2qty, size; unsigned long log2qty, size;
void *table = NULL; void *table = NULL;
gfp_t gfp_flags;
/* allow the kernel cmdline to have a say */ /* allow the kernel cmdline to have a say */
if (!numentries) { if (!numentries) {
...@@ -7242,12 +7243,17 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7242,12 +7243,17 @@ void *__init alloc_large_system_hash(const char *tablename,
log2qty = ilog2(numentries); log2qty = ilog2(numentries);
/*
* memblock allocator returns zeroed memory already, so HASH_ZERO is
* currently not used when HASH_EARLY is specified.
*/
gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
do { do {
size = bucketsize << log2qty; size = bucketsize << log2qty;
if (flags & HASH_EARLY) if (flags & HASH_EARLY)
table = memblock_virt_alloc_nopanic(size, 0); table = memblock_virt_alloc_nopanic(size, 0);
else if (hashdist) else if (hashdist)
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
else { else {
/* /*
* If bucketsize is not a power-of-two, we may free * If bucketsize is not a power-of-two, we may free
...@@ -7255,8 +7261,8 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7255,8 +7261,8 @@ void *__init alloc_large_system_hash(const char *tablename,
* alloc_pages_exact() automatically does * alloc_pages_exact() automatically does
*/ */
if (get_order(size) < MAX_ORDER) { if (get_order(size) < MAX_ORDER) {
table = alloc_pages_exact(size, GFP_ATOMIC); table = alloc_pages_exact(size, gfp_flags);
kmemleak_alloc(table, size, 1, GFP_ATOMIC); kmemleak_alloc(table, size, 1, gfp_flags);
} }
} }
} while (!table && size > PAGE_SIZE && --log2qty); } while (!table && size > PAGE_SIZE && --log2qty);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment