Commit 09e61a77 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mm: Make __VIRTUAL_MASK_SHIFT dynamic

For boot-time switching between paging modes, we need to be able to
adjust virtual mask shifts.

The change doesn't affect the kernel image size much:

   text	   data	    bss	    dec	    hex	filename
8628892	4734340	1368064	14731296	 e0c820	vmlinux.before
8628966	4734340	1368064	14731370	 e0c86a	vmlinux.after
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214111656.88514-9-kirill.shutemov@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 162434e7
...@@ -274,8 +274,20 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) ...@@ -274,8 +274,20 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
* Change top bits to match most significant bit (47th or 56th bit * Change top bits to match most significant bit (47th or 56th bit
* depending on paging mode) in the address. * depending on paging mode) in the address.
*/ */
#ifdef CONFIG_X86_5LEVEL
testl $1, pgtable_l5_enabled(%rip)
jz 1f
shl $(64 - 57), %rcx
sar $(64 - 57), %rcx
jmp 2f
1:
shl $(64 - 48), %rcx
sar $(64 - 48), %rcx
2:
#else
shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
#endif
/* If this changed %rcx, it was not canonical */ /* If this changed %rcx, it was not canonical */
cmpq %rcx, %r11 cmpq %rcx, %r11
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#define __PHYSICAL_MASK_SHIFT 52 #define __PHYSICAL_MASK_SHIFT 52
#ifdef CONFIG_X86_5LEVEL #ifdef CONFIG_X86_5LEVEL
#define __VIRTUAL_MASK_SHIFT 56 #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled ? 56 : 47)
#else #else
#define __VIRTUAL_MASK_SHIFT 47 #define __VIRTUAL_MASK_SHIFT 47
#endif #endif
......
...@@ -85,8 +85,12 @@ static struct addr_marker address_markers[] = { ...@@ -85,8 +85,12 @@ static struct addr_marker address_markers[] = {
[VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
[VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, /*
[KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, * These fields get initialized with the (dynamic)
* KASAN_SHADOW_{START,END} values in pt_dump_init().
*/
[KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
[KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
#endif #endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
[LDT_NR] = { 0UL, "LDT remap" }, [LDT_NR] = { 0UL, "LDT remap" },
...@@ -571,6 +575,10 @@ static int __init pt_dump_init(void) ...@@ -571,6 +575,10 @@ static int __init pt_dump_init(void)
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
address_markers[LDT_NR].start_address = LDT_BASE_ADDR; address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
#endif #endif
#ifdef CONFIG_KASAN
address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
#endif
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
......
...@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region { ...@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
unsigned long *base; unsigned long *base;
unsigned long size_tb; unsigned long size_tb;
} kaslr_regions[] = { } kaslr_regions[] = {
{ &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ }, { &page_offset_base, 0 },
{ &vmalloc_base, VMALLOC_SIZE_TB }, { &vmalloc_base, VMALLOC_SIZE_TB },
{ &vmemmap_base, 1 }, { &vmemmap_base, 1 },
}; };
...@@ -93,6 +93,8 @@ void __init kernel_randomize_memory(void) ...@@ -93,6 +93,8 @@ void __init kernel_randomize_memory(void)
if (!kaslr_memory_enabled()) if (!kaslr_memory_enabled())
return; return;
kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
/* /*
* Update Physical memory mapping to available and * Update Physical memory mapping to available and
* add padding if needed (especially for memory hotplug support). * add padding if needed (especially for memory hotplug support).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment