Commit 1070730c authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mm/encrypt: Simplify sme_pgtable_calc()

sme_pgtable_calc() is unnecessary complex. It can be re-written in a
more stream-lined way.

As a side effect, we would get the code ready to boot-time switching
between paging modes.
Tested-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180131135404.40692-4-kirill.shutemov@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aad98391
...@@ -231,8 +231,7 @@ static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) ...@@ -231,8 +231,7 @@ static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
static unsigned long __init sme_pgtable_calc(unsigned long len) static unsigned long __init sme_pgtable_calc(unsigned long len)
{ {
unsigned long p4d_size, pud_size, pmd_size, pte_size; unsigned long entries = 0, tables = 0;
unsigned long total;
/* /*
* Perform a relatively simplistic calculation of the pagetable * Perform a relatively simplistic calculation of the pagetable
...@@ -246,42 +245,25 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) ...@@ -246,42 +245,25 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
* Incrementing the count for each covers the case where the addresses * Incrementing the count for each covers the case where the addresses
* cross entries. * cross entries.
*/ */
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1;
pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
} else {
p4d_size = 0;
pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
}
pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
total = p4d_size + pud_size + pmd_size + pte_size; /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
if (PTRS_PER_P4D > 1)
entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
/* /*
* Now calculate the added pagetable structures needed to populate * Now calculate the added pagetable structures needed to populate
* the new pagetables. * the new pagetables.
*/ */
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE;
pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
} else {
p4d_size = 0;
pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
}
pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE;
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
total += p4d_size + pud_size + pmd_size; if (PTRS_PER_P4D > 1)
tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
return total; return entries + tables;
} }
void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment