Commit 02772fb9 authored by Joerg Roedel's avatar Joerg Roedel Committed by Borislav Petkov

x86/sev-es: Allocate and map an IST stack for #VC handler

Allocate and map an IST stack and an additional fall-back stack for
the #VC handler.  The memory for the stacks is allocated only when
SEV-ES is active.

The #VC handler needs to use an IST stack because a #VC exception can be
raised from kernel space with unsafe stack, e.g. in the SYSCALL entry
path.

Since the #VC exception can be nested, the #VC handler switches back to
the interrupted stack when entered from kernel space. If switching back
is not possible, the fall-back stack is used.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-43-joro@8bytes.org
parent 885689e4
...@@ -11,25 +11,29 @@ ...@@ -11,25 +11,29 @@
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* Macro to enforce the same ordering and stack sizes */ /* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize) \ #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
char DF_stack_guard[guardsize]; \ char DF_stack_guard[guardsize]; \
char DF_stack[EXCEPTION_STKSZ]; \ char DF_stack[EXCEPTION_STKSZ]; \
char NMI_stack_guard[guardsize]; \ char NMI_stack_guard[guardsize]; \
char NMI_stack[EXCEPTION_STKSZ]; \ char NMI_stack[EXCEPTION_STKSZ]; \
char DB_stack_guard[guardsize]; \ char DB_stack_guard[guardsize]; \
char DB_stack[EXCEPTION_STKSZ]; \ char DB_stack[EXCEPTION_STKSZ]; \
char MCE_stack_guard[guardsize]; \ char MCE_stack_guard[guardsize]; \
char MCE_stack[EXCEPTION_STKSZ]; \ char MCE_stack[EXCEPTION_STKSZ]; \
char IST_top_guard[guardsize]; \ char VC_stack_guard[guardsize]; \
char VC_stack[optional_stack_size]; \
char VC2_stack_guard[guardsize]; \
char VC2_stack[optional_stack_size]; \
char IST_top_guard[guardsize]; \
/* The exception stacks' physical storage. No guard pages required */ /* The exception stacks' physical storage. No guard pages required */
struct exception_stacks { struct exception_stacks {
ESTACKS_MEMBERS(0) ESTACKS_MEMBERS(0, 0)
}; };
/* The effective cpu entry area mapping with guard pages. */ /* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks { struct cea_exception_stacks {
ESTACKS_MEMBERS(PAGE_SIZE) ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
}; };
/* /*
...@@ -40,6 +44,8 @@ enum exception_stack_ordering { ...@@ -40,6 +44,8 @@ enum exception_stack_ordering {
ESTACK_NMI, ESTACK_NMI,
ESTACK_DB, ESTACK_DB,
ESTACK_MCE, ESTACK_MCE,
ESTACK_VC,
ESTACK_VC2,
N_EXCEPTION_STACKS N_EXCEPTION_STACKS
}; };
...@@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) ...@@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
#define __this_cpu_ist_top_va(name) \ #define __this_cpu_ist_top_va(name) \
CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
#define __this_cpu_ist_bottom_va(name) \
CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
#endif #endif
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define IST_INDEX_NMI 1 #define IST_INDEX_NMI 1
#define IST_INDEX_DB 2 #define IST_INDEX_DB 2
#define IST_INDEX_MCE 3 #define IST_INDEX_MCE 3
#define IST_INDEX_VC 4
/* /*
* Set __PAGE_OFFSET to the most negative possible address + * Set __PAGE_OFFSET to the most negative possible address +
......
...@@ -1829,6 +1829,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) ...@@ -1829,6 +1829,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
/* Only mapped when SEV-ES is active */
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
} }
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
......
...@@ -24,11 +24,13 @@ static const char * const exception_stack_names[] = { ...@@ -24,11 +24,13 @@ static const char * const exception_stack_names[] = {
[ ESTACK_NMI ] = "NMI", [ ESTACK_NMI ] = "NMI",
[ ESTACK_DB ] = "#DB", [ ESTACK_DB ] = "#DB",
[ ESTACK_MCE ] = "#MC", [ ESTACK_MCE ] = "#MC",
[ ESTACK_VC ] = "#VC",
[ ESTACK_VC2 ] = "#VC2",
}; };
const char *stack_type_name(enum stack_type type) const char *stack_type_name(enum stack_type type)
{ {
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
if (type == STACK_TYPE_IRQ) if (type == STACK_TYPE_IRQ)
return "IRQ"; return "IRQ";
...@@ -79,6 +81,8 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { ...@@ -79,6 +81,8 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(NMI), EPAGERANGE(NMI),
EPAGERANGE(DB), EPAGERANGE(DB),
EPAGERANGE(MCE), EPAGERANGE(MCE),
EPAGERANGE(VC),
EPAGERANGE(VC2),
}; };
static bool in_exception_stack(unsigned long *stack, struct stack_info *info) static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
...@@ -88,7 +92,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info) ...@@ -88,7 +92,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
struct pt_regs *regs; struct pt_regs *regs;
unsigned int k; unsigned int k;
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks); begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
/* /*
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cpu_entry_area.h>
#include <asm/sev-es.h> #include <asm/sev-es.h>
#include <asm/insn-eval.h> #include <asm/insn-eval.h>
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
...@@ -37,10 +38,41 @@ static struct ghcb __initdata *boot_ghcb; ...@@ -37,10 +38,41 @@ static struct ghcb __initdata *boot_ghcb;
/* #VC handler runtime per-CPU data */ /* #VC handler runtime per-CPU data */
struct sev_es_runtime_data { struct sev_es_runtime_data {
struct ghcb ghcb_page; struct ghcb ghcb_page;
/* Physical storage for the per-CPU IST stack of the #VC handler */
char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
/*
* Physical storage for the per-CPU fall-back stack of the #VC handler.
* The fall-back stack is used when it is not safe to switch back to the
* interrupted stack in the #VC entry code.
*/
char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
}; };
static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
static void __init setup_vc_stacks(int cpu)
{
struct sev_es_runtime_data *data;
struct cpu_entry_area *cea;
unsigned long vaddr;
phys_addr_t pa;
data = per_cpu(runtime_data, cpu);
cea = get_cpu_entry_area(cpu);
/* Map #VC IST stack */
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC);
pa = __pa(data->ist_stack);
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
/* Map VC fall-back stack */
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2);
pa = __pa(data->fallback_stack);
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
}
/* Needed in vc_early_forward_exception */ /* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr); void do_early_exception(struct pt_regs *regs, int trapnr);
...@@ -249,6 +281,7 @@ void __init sev_es_init_vc_handling(void) ...@@ -249,6 +281,7 @@ void __init sev_es_init_vc_handling(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
alloc_runtime_data(cpu); alloc_runtime_data(cpu);
init_ghcb(cpu); init_ghcb(cpu);
setup_vc_stacks(cpu);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment