Commit ec1c66af authored by Michael Roth's avatar Michael Roth Committed by Borislav Petkov

x86/compressed/64: Detect/setup SEV/SME features earlier during boot

With upcoming SEV-SNP support, SEV-related features need to be
initialized earlier during boot, at the same point the initial #VC
handler is set up, so that the SEV-SNP CPUID table can be utilized
during the initial feature checks. Also, SEV-SNP feature detection
will rely on EFI helper functions to scan the EFI config table for the
Confidential Computing blob, and so would need to be implemented at
least partially in C.

Currently set_sev_encryption_mask() is used to initialize the
sev_status and sme_me_mask globals that advertise what SEV/SME features
are available in a guest. Rename it to sev_enable() to better reflect
that (SME is only enabled in the case of SEV guests in the
boot/compressed kernel), and move it to just after the stage1 #VC
handler is set up so that it can be used to initialize SEV-SNP as well
in future patches.

While at it, re-implement it as C code so that all SEV feature
detection can be better consolidated with upcoming SEV-SNP feature
detection, which will also be in C.

The 32-bit entry path remains unchanged, as it never relied on the
set_sev_encryption_mask() initialization to begin with.

  [ bp: Massage commit message. ]
Signed-off-by: default avatarMichael Roth <michael.roth@amd.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20220307213356.2797205-8-brijesh.singh@amd.com
parent 950d0055
...@@ -189,11 +189,11 @@ SYM_FUNC_START(startup_32) ...@@ -189,11 +189,11 @@ SYM_FUNC_START(startup_32)
subl $32, %eax /* Encryption bit is always above bit 31 */ subl $32, %eax /* Encryption bit is always above bit 31 */
bts %eax, %edx /* Set encryption mask for page tables */ bts %eax, %edx /* Set encryption mask for page tables */
/* /*
* Mark SEV as active in sev_status so that startup32_check_sev_cbit() * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that
* will do a check. The sev_status memory will be fully initialized * startup32_check_sev_cbit() will do a check. sev_enable() will
* with the contents of MSR_AMD_SEV_STATUS later in * initialize sev_status with all the bits reported by
* set_sev_encryption_mask(). For now it is sufficient to know that SEV * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT
* is active. * needs to be set for now.
*/ */
movl $1, rva(sev_status)(%ebp) movl $1, rva(sev_status)(%ebp)
1: 1:
...@@ -447,6 +447,23 @@ SYM_CODE_START(startup_64) ...@@ -447,6 +447,23 @@ SYM_CODE_START(startup_64)
call load_stage1_idt call load_stage1_idt
popq %rsi popq %rsi
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* Now that the stage1 interrupt handlers are set up, #VC exceptions from
* CPUID instructions can be properly handled for SEV-ES guests.
*
* For SEV-SNP, the CPUID table also needs to be set up in advance of any
* CPUID instructions being issued, so go ahead and do that now via
* sev_enable(), which will also handle the rest of the SEV-related
* detection/setup to ensure that has been done in advance of any dependent
* code.
*/
pushq %rsi
movq %rsi, %rdi /* real mode address */
call sev_enable
popq %rsi
#endif
/* /*
* paging_prepare() sets up the trampoline and checks if we need to * paging_prepare() sets up the trampoline and checks if we need to
* enable 5-level paging. * enable 5-level paging.
...@@ -558,17 +575,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -558,17 +575,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
shrq $3, %rcx shrq $3, %rcx
rep stosq rep stosq
/*
* If running as an SEV guest, the encryption mask is required in the
* page-table setup code below. When the guest also has SEV-ES enabled
* set_sev_encryption_mask() will cause #VC exceptions, but the stage2
* handler can't map its GHCB because the page-table is not set up yet.
* So set up the encryption mask here while still on the stage1 #VC
* handler. Then load stage2 IDT and switch to the kernel's own
* page-table.
*/
pushq %rsi pushq %rsi
call set_sev_encryption_mask
call load_stage2_idt call load_stage2_idt
/* Pass boot_params to initialize_identity_maps() */ /* Pass boot_params to initialize_identity_maps() */
......
...@@ -187,42 +187,6 @@ SYM_CODE_END(startup32_vc_handler) ...@@ -187,42 +187,6 @@ SYM_CODE_END(startup32_vc_handler)
.code64 .code64
#include "../../kernel/sev_verify_cbit.S" #include "../../kernel/sev_verify_cbit.S"
SYM_FUNC_START(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp
push %rdx
movq %rsp, %rbp /* Save current stack pointer */
call get_sev_encryption_bit /* Get the encryption bit position */
testl %eax, %eax
jz .Lno_sev_mask
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
/*
* Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
* get_sev_encryption_bit() because this function is 32-bit code and
* shared between 64-bit and 32-bit boot path.
*/
movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
rdmsr
/* Store MSR value in sev_status */
shlq $32, %rdx
orq %rdx, %rax
movq %rax, sev_status(%rip)
.Lno_sev_mask:
movq %rbp, %rsp /* Restore original stack pointer */
pop %rdx
pop %rbp
#endif
xor %rax, %rax
RET
SYM_FUNC_END(set_sev_encryption_mask)
.data .data
......
...@@ -120,12 +120,12 @@ static inline void console_init(void) ...@@ -120,12 +120,12 @@ static inline void console_init(void)
{ } { }
#endif #endif
void set_sev_encryption_mask(void);
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
void sev_enable(struct boot_params *bp);
void sev_es_shutdown_ghcb(void); void sev_es_shutdown_ghcb(void);
extern bool sev_es_check_ghcb_fault(unsigned long address); extern bool sev_es_check_ghcb_fault(unsigned long address);
#else #else
static inline void sev_enable(struct boot_params *bp) { }
static inline void sev_es_shutdown_ghcb(void) { } static inline void sev_es_shutdown_ghcb(void) { }
static inline bool sev_es_check_ghcb_fault(unsigned long address) static inline bool sev_es_check_ghcb_fault(unsigned long address)
{ {
......
...@@ -201,3 +201,39 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) ...@@ -201,3 +201,39 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
else if (result != ES_RETRY) else if (result != ES_RETRY)
sev_es_terminate(GHCB_SEV_ES_GEN_REQ); sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
} }
void sev_enable(struct boot_params *bp)
{
unsigned int eax, ebx, ecx, edx;
struct msr m;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return;
/* Set the SME mask if this is an SEV guest. */
boot_rdmsr(MSR_AMD64_SEV, &m);
sev_status = m.q;
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
return;
sme_me_mask = BIT_ULL(ebx & 0x3f);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment