Commit 59b3d020 authored by Thomas Garnier's avatar Thomas Garnier Committed by Ingo Molnar

x86/mm: Update physical mapping variable names

Change the variable names in kernel_physical_mapping_init() and related
functions to correctly reflect physical and virtual memory addresses.
Also add comments on each function to describe usage and alignment
constraints.
Signed-off-by: default avatarThomas Garnier <thgarnie@google.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Kuleshov <kuleshovmail@gmail.com>
Cc: Alexander Popov <alpopov@ptsecurity.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lv Zheng <lv.zheng@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: kernel-hardening@lists.openwall.com
Cc: linux-doc@vger.kernel.org
Link: http://lkml.kernel.org/r/1466556426-32664-3-git-send-email-keescook@chromium.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d899a7d1
...@@ -328,22 +328,30 @@ void __init cleanup_highmap(void) ...@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
} }
} }
/*
* Create PTE level page table mapping for physical addresses.
* It returns the last physical address mapped.
*/
static unsigned long __meminit static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pgprot_t prot) pgprot_t prot)
{ {
unsigned long pages = 0, next; unsigned long pages = 0, paddr_next;
unsigned long last_map_addr = end; unsigned long paddr_last = paddr_end;
pte_t *pte;
int i; int i;
pte_t *pte = pte_page + pte_index(addr); pte = pte_page + pte_index(paddr);
i = pte_index(paddr);
for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) { for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
next = (addr & PAGE_MASK) + PAGE_SIZE; paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
if (addr >= end) { if (paddr >= paddr_end) {
if (!after_bootmem && if (!after_bootmem &&
!e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) && !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
!e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN)) E820_RAM) &&
!e820_any_mapped(paddr & PAGE_MASK, paddr_next,
E820_RESERVED_KERN))
set_pte(pte, __pte(0)); set_pte(pte, __pte(0));
continue; continue;
} }
...@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, ...@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
} }
if (0) if (0)
printk(" pte=%p addr=%lx pte=%016lx\n", pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++; pages++;
set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
} }
update_page_count(PG_LEVEL_4K, pages); update_page_count(PG_LEVEL_4K, pages);
return last_map_addr; return paddr_last;
} }
/*
* Create PMD level page table mapping for physical addresses. The virtual
* and physical address have to be aligned at this level.
* It returns the last physical address mapped.
*/
static unsigned long __meminit static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, pgprot_t prot) unsigned long page_size_mask, pgprot_t prot)
{ {
unsigned long pages = 0, next; unsigned long pages = 0, paddr_next;
unsigned long last_map_addr = end; unsigned long paddr_last = paddr_end;
int i = pmd_index(address); int i = pmd_index(paddr);
for (; i < PTRS_PER_PMD; i++, address = next) { for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
pmd_t *pmd = pmd_page + pmd_index(address); pmd_t *pmd = pmd_page + pmd_index(paddr);
pte_t *pte; pte_t *pte;
pgprot_t new_prot = prot; pgprot_t new_prot = prot;
next = (address & PMD_MASK) + PMD_SIZE; paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
if (address >= end) { if (paddr >= paddr_end) {
if (!after_bootmem && if (!after_bootmem &&
!e820_any_mapped(address & PMD_MASK, next, E820_RAM) && !e820_any_mapped(paddr & PMD_MASK, paddr_next,
!e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN)) E820_RAM) &&
!e820_any_mapped(paddr & PMD_MASK, paddr_next,
E820_RESERVED_KERN))
set_pmd(pmd, __pmd(0)); set_pmd(pmd, __pmd(0));
continue; continue;
} }
...@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, ...@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (!pmd_large(*pmd)) { if (!pmd_large(*pmd)) {
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd); pte = (pte_t *)pmd_page_vaddr(*pmd);
last_map_addr = phys_pte_init(pte, address, paddr_last = phys_pte_init(pte, paddr,
end, prot); paddr_end, prot);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
continue; continue;
} }
...@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, ...@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_2M)) { if (page_size_mask & (1 << PG_LEVEL_2M)) {
if (!after_bootmem) if (!after_bootmem)
pages++; pages++;
last_map_addr = next; paddr_last = paddr_next;
continue; continue;
} }
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
...@@ -430,42 +445,49 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, ...@@ -430,42 +445,49 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
pages++; pages++;
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd, set_pte((pte_t *)pmd,
pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE))); __pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
last_map_addr = next; paddr_last = paddr_next;
continue; continue;
} }
pte = alloc_low_page(); pte = alloc_low_page();
last_map_addr = phys_pte_init(pte, address, end, new_prot); paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_populate_kernel(&init_mm, pmd, pte); pmd_populate_kernel(&init_mm, pmd, pte);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
update_page_count(PG_LEVEL_2M, pages); update_page_count(PG_LEVEL_2M, pages);
return last_map_addr; return paddr_last;
} }
/*
* Create PUD level page table mapping for physical addresses. The virtual
* and physical address have to be aligned at this level.
* It returns the last physical address mapped.
*/
static unsigned long __meminit static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask) unsigned long page_size_mask)
{ {
unsigned long pages = 0, next; unsigned long pages = 0, paddr_next;
unsigned long last_map_addr = end; unsigned long paddr_last = paddr_end;
int i = pud_index(addr); int i = pud_index(paddr);
for (; i < PTRS_PER_PUD; i++, addr = next) { for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
pud_t *pud = pud_page + pud_index(addr); pud_t *pud = pud_page + pud_index(paddr);
pmd_t *pmd; pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL; pgprot_t prot = PAGE_KERNEL;
next = (addr & PUD_MASK) + PUD_SIZE; paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
if (addr >= end) { if (paddr >= paddr_end) {
if (!after_bootmem && if (!after_bootmem &&
!e820_any_mapped(addr & PUD_MASK, next, E820_RAM) && !e820_any_mapped(paddr & PUD_MASK, paddr_next,
!e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN)) E820_RAM) &&
!e820_any_mapped(paddr & PUD_MASK, paddr_next,
E820_RESERVED_KERN))
set_pud(pud, __pud(0)); set_pud(pud, __pud(0));
continue; continue;
} }
...@@ -473,8 +495,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, ...@@ -473,8 +495,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (pud_val(*pud)) { if (pud_val(*pud)) {
if (!pud_large(*pud)) { if (!pud_large(*pud)) {
pmd = pmd_offset(pud, 0); pmd = pmd_offset(pud, 0);
last_map_addr = phys_pmd_init(pmd, addr, end, paddr_last = phys_pmd_init(pmd, paddr,
page_size_mask, prot); paddr_end,
page_size_mask,
prot);
__flush_tlb_all(); __flush_tlb_all();
continue; continue;
} }
...@@ -493,7 +517,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, ...@@ -493,7 +517,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_1G)) { if (page_size_mask & (1 << PG_LEVEL_1G)) {
if (!after_bootmem) if (!after_bootmem)
pages++; pages++;
last_map_addr = next; paddr_last = paddr_next;
continue; continue;
} }
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
...@@ -503,16 +527,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, ...@@ -503,16 +527,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
pages++; pages++;
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pud, set_pte((pte_t *)pud,
pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE)); PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
last_map_addr = next; paddr_last = paddr_next;
continue; continue;
} }
pmd = alloc_low_page(); pmd = alloc_low_page();
last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
prot); page_size_mask, prot);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_populate(&init_mm, pud, pmd); pud_populate(&init_mm, pud, pmd);
...@@ -522,38 +546,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, ...@@ -522,38 +546,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
update_page_count(PG_LEVEL_1G, pages); update_page_count(PG_LEVEL_1G, pages);
return last_map_addr; return paddr_last;
} }
/*
* Create page table mapping for the physical memory for specific physical
* addresses. The virtual and physical addresses have to be aligned on PUD level
* down. It returns the last physical address mapped.
*/
unsigned long __meminit unsigned long __meminit
kernel_physical_mapping_init(unsigned long start, kernel_physical_mapping_init(unsigned long paddr_start,
unsigned long end, unsigned long paddr_end,
unsigned long page_size_mask) unsigned long page_size_mask)
{ {
bool pgd_changed = false; bool pgd_changed = false;
unsigned long next, last_map_addr = end; unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
unsigned long addr;
start = (unsigned long)__va(start); paddr_last = paddr_end;
end = (unsigned long)__va(end); vaddr = (unsigned long)__va(paddr_start);
addr = start; vaddr_end = (unsigned long)__va(paddr_end);
vaddr_start = vaddr;
for (; start < end; start = next) { for (; vaddr < vaddr_end; vaddr = vaddr_next) {
pgd_t *pgd = pgd_offset_k(start); pgd_t *pgd = pgd_offset_k(vaddr);
pud_t *pud; pud_t *pud;
next = (start & PGDIR_MASK) + PGDIR_SIZE; vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
if (pgd_val(*pgd)) { if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd); pud = (pud_t *)pgd_page_vaddr(*pgd);
last_map_addr = phys_pud_init(pud, __pa(start), paddr_last = phys_pud_init(pud, __pa(vaddr),
__pa(end), page_size_mask); __pa(vaddr_end),
page_size_mask);
continue; continue;
} }
pud = alloc_low_page(); pud = alloc_low_page();
last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
page_size_mask); page_size_mask);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pgd_populate(&init_mm, pgd, pud); pgd_populate(&init_mm, pgd, pud);
...@@ -562,11 +592,11 @@ kernel_physical_mapping_init(unsigned long start, ...@@ -562,11 +592,11 @@ kernel_physical_mapping_init(unsigned long start,
} }
if (pgd_changed) if (pgd_changed)
sync_global_pgds(addr, end - 1, 0); sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
__flush_tlb_all(); __flush_tlb_all();
return last_map_addr; return paddr_last;
} }
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment