Commit c6975d7c authored by Qian Cai's avatar Qian Cai Committed by Will Deacon

arm64: Track no early_pgtable_alloc() for kmemleak

After switched page size from 64KB to 4KB on several arm64 servers here,
kmemleak starts to run out of early memory pool due to a huge number of
those early_pgtable_alloc() calls:

  kmemleak_alloc_phys()
  memblock_alloc_range_nid()
  memblock_phys_alloc_range()
  early_pgtable_alloc()
  init_pmd()
  alloc_init_pud()
  __create_pgd_mapping()
  __map_memblock()
  paging_init()
  setup_arch()
  start_kernel()

Increased the default value of DEBUG_KMEMLEAK_MEM_POOL_SIZE by 4 times
won't be enough for a server with 200GB+ memory. There isn't much
interesting to check memory leaks for those early page tables and those
early memory mappings should not reference to other memory. Hence, no
kmemleak false positives, and we can safely skip tracking those early
allocations from kmemleak like we did in the commit fed84c78
("mm/memblock.c: skip kmemleak for kasan_init()") without needing to
introduce complications to automatically scale the value depends on the
runtime memory size etc. After the patch, the default value of
DEBUG_KMEMLEAK_MEM_POOL_SIZE becomes sufficient again.
Signed-off-by: default avatarQian Cai <quic_qiancai@quicinc.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/20211105150509.7826-1-quic_qiancai@quicinc.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent aedad3e1
...@@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; ...@@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
static __init void *kasan_alloc_block(size_t size) static __init void *kasan_alloc_block(size_t size)
{ {
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE); MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
} }
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
......
...@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) ...@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{ {
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node); MEMBLOCK_ALLOC_NOLEAKTRACE, node);
if (!p) if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node, __func__, PAGE_SIZE, PAGE_SIZE, node,
...@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) ...@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
{ {
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node); MEMBLOCK_ALLOC_NOLEAKTRACE,
node);
if (!p) if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node, __func__, PAGE_SIZE, PAGE_SIZE, node,
......
...@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift) ...@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
phys_addr_t phys; phys_addr_t phys;
void *ptr; void *ptr;
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
MEMBLOCK_ALLOC_NOLEAKTRACE);
if (!phys) if (!phys)
panic("Failed to allocate page table page\n"); panic("Failed to allocate page table page\n");
......
...@@ -372,7 +372,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) ...@@ -372,7 +372,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */ /* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
#define MEMBLOCK_ALLOC_KASAN 1 #define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */ /* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0 #define MEMBLOCK_LOW_LIMIT 0
......
...@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, ...@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
{ {
/* pump up @end */ /* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE || if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_KASAN) end == MEMBLOCK_ALLOC_NOLEAKTRACE)
end = memblock.current_limit; end = memblock.current_limit;
/* avoid allocating the first page */ /* avoid allocating the first page */
...@@ -1379,8 +1379,11 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, ...@@ -1379,8 +1379,11 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
return 0; return 0;
done: done:
/* Skip kmemleak for kasan_init() due to high volume. */ /*
if (end != MEMBLOCK_ALLOC_KASAN) * Skip kmemleak for those places like kasan_init() and
* early_pgtable_alloc() due to high volume.
*/
if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
/* /*
* The min_count is set to 0 so that memblock allocated * The min_count is set to 0 so that memblock allocated
* blocks are never reported as leaks. This is because many * blocks are never reported as leaks. This is because many
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment