Commit 2a0b5c0d authored by Catalin Marinas's avatar Catalin Marinas

arm64: Align less than PAGE_SIZE pgds naturally

When the pgd size is smaller than PAGE_SIZE, pgd_alloc() uses kzalloc()
to save space. However, this is not always naturally aligned as required
by the architecture. This patch creates a kmem_cache for pgd allocations
with the correct alignment.

The current kernel configurations with 4K pages + 39-bit VA and 64K
pages + 42-bit VA use a full page for the pgd and are not affected. The
patch is required for 48-bit VA with 64K pages where the pgd is 512
bytes.
Reported-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 04f905a9
......@@ -30,12 +30,14 @@
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
static struct kmem_cache *pgd_cache;
pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (PGD_SIZE == PAGE_SIZE)
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
return kzalloc(PGD_SIZE, GFP_KERNEL);
return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
......@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
if (PGD_SIZE == PAGE_SIZE)
free_page((unsigned long)pgd);
else
kfree(pgd);
kmem_cache_free(pgd_cache, pgd);
}
static int __init pgd_cache_init(void)
{
/*
* Naturally aligned pgds required by the architecture.
*/
if (PGD_SIZE != PAGE_SIZE)
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
SLAB_PANIC, NULL);
return 0;
}
core_initcall(pgd_cache_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment