Commit 1bdb67e5 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/exceptions: Enable IST guard pages

All usage sites which expected that the exception stacks in the CPU entry
area are mapped linearly are fixed up. Enable guard pages between the
IST stacks.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160145.349862042@linutronix.de
parent 32074269
...@@ -26,13 +26,9 @@ struct exception_stacks { ...@@ -26,13 +26,9 @@ struct exception_stacks {
ESTACKS_MEMBERS(0) ESTACKS_MEMBERS(0)
}; };
/* /* The effective cpu entry area mapping with guard pages. */
* The effective cpu entry area mapping with guard pages. Guard size is
* zero until the code which makes assumptions about linear mappings is
* cleaned up.
*/
struct cea_exception_stacks { struct cea_exception_stacks {
ESTACKS_MEMBERS(0) ESTACKS_MEMBERS(PAGE_SIZE)
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment