Commit d2b2931f authored by Tom Lendacky's avatar Tom Lendacky Committed by Borislav Petkov (AMD)

x86/sev: Use the SVSM to create a vCPU when not in VMPL0

Using the RMPADJUST instruction, the VMSA attribute can only be changed
at VMPL0. An SVSM will be present when running at VMPL1 or a lower
privilege level.

In that case, use the SVSM_CORE_CREATE_VCPU call or the
SVSM_CORE_DESTROY_VCPU call to perform VMSA attribute changes. Use the
VMPL level supplied by the SVSM for the VMSA when starting the AP.

  [ bp: Fix typo + touchups. ]
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/bcdd95ecabe9723673b9693c7f1533a2b8f17781.1717600736.git.thomas.lendacky@amd.com
parent fcd042e8
...@@ -233,6 +233,8 @@ struct svsm_call { ...@@ -233,6 +233,8 @@ struct svsm_call {
#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x)) #define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
#define SVSM_CORE_REMAP_CA 0 #define SVSM_CORE_REMAP_CA 0
#define SVSM_CORE_PVALIDATE 1 #define SVSM_CORE_PVALIDATE 1
#define SVSM_CORE_CREATE_VCPU 2
#define SVSM_CORE_DELETE_VCPU 3
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
extern void __sev_es_ist_enter(struct pt_regs *regs); extern void __sev_es_ist_enter(struct pt_regs *regs);
......
...@@ -1010,22 +1010,49 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end) ...@@ -1010,22 +1010,49 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
} }
static int snp_set_vmsa(void *va, bool vmsa) static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{ {
u64 attrs; int ret;
/* if (snp_vmpl) {
* Running at VMPL0 allows the kernel to change the VMSA bit for a page struct svsm_call call = {};
* using the RMPADJUST instruction. However, for the instruction to unsigned long flags;
* succeed it must target the permissions of a lesser privileged
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST local_irq_save(flags);
* instruction in the AMD64 APM Volume 3).
*/ call.caa = this_cpu_read(svsm_caa);
attrs = 1; call.rcx = __pa(va);
if (vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT; if (make_vmsa) {
/* Protocol 0, Call ID 2 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
call.rdx = __pa(caa);
call.r8 = apic_id;
} else {
/* Protocol 0, Call ID 3 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
}
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); ret = svsm_perform_call_protocol(&call);
local_irq_restore(flags);
} else {
/*
* If the kernel runs at VMPL0, it can change the VMSA
* bit for a page using the RMPADJUST instruction.
* However, for the instruction to succeed it must
* target the permissions of a lesser privileged (higher
* numbered) VMPL level, so use VMPL1.
*/
u64 attrs = 1;
if (make_vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
return ret;
} }
#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK) #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
...@@ -1059,11 +1086,11 @@ static void *snp_alloc_vmsa_page(int cpu) ...@@ -1059,11 +1086,11 @@ static void *snp_alloc_vmsa_page(int cpu)
return page_address(p + 1); return page_address(p + 1);
} }
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{ {
int err; int err;
err = snp_set_vmsa(vmsa, false); err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err) if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err); pr_err("clear VMSA page failed (%u), leaking page\n", err);
else else
...@@ -1074,6 +1101,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1074,6 +1101,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{ {
struct sev_es_save_area *cur_vmsa, *vmsa; struct sev_es_save_area *cur_vmsa, *vmsa;
struct ghcb_state state; struct ghcb_state state;
struct svsm_ca *caa;
unsigned long flags; unsigned long flags;
struct ghcb *ghcb; struct ghcb *ghcb;
u8 sipi_vector; u8 sipi_vector;
...@@ -1120,6 +1148,9 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1120,6 +1148,9 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
if (!vmsa) if (!vmsa)
return -ENOMEM; return -ENOMEM;
/* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */
caa = per_cpu(svsm_caa, cpu);
/* CR4 should maintain the MCE value */ /* CR4 should maintain the MCE value */
cr4 = native_read_cr4() & X86_CR4_MCE; cr4 = native_read_cr4() & X86_CR4_MCE;
...@@ -1167,11 +1198,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1167,11 +1198,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* VMPL level * VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/ */
vmsa->vmpl = 0; vmsa->vmpl = snp_vmpl;
vmsa->sev_features = sev_status >> 2; vmsa->sev_features = sev_status >> 2;
/* Switch the page over to a VMSA page now that it is initialized */ /* Switch the page over to a VMSA page now that it is initialized */
ret = snp_set_vmsa(vmsa, true); ret = snp_set_vmsa(vmsa, caa, apic_id, true);
if (ret) { if (ret) {
pr_err("set VMSA page failed (%u)\n", ret); pr_err("set VMSA page failed (%u)\n", ret);
free_page((unsigned long)vmsa); free_page((unsigned long)vmsa);
...@@ -1187,7 +1218,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1187,7 +1218,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vc_ghcb_invalidate(ghcb); vc_ghcb_invalidate(ghcb);
ghcb_set_rax(ghcb, vmsa->sev_features); ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE); ghcb_set_sw_exit_info_1(ghcb,
((u64)apic_id << 32) |
((u64)snp_vmpl << 16) |
SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
sev_es_wr_ghcb_msr(__pa(ghcb)); sev_es_wr_ghcb_msr(__pa(ghcb));
...@@ -1205,13 +1239,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) ...@@ -1205,13 +1239,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
/* Perform cleanup if there was an error */ /* Perform cleanup if there was an error */
if (ret) { if (ret) {
snp_cleanup_vmsa(vmsa); snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL; vmsa = NULL;
} }
/* Free up any previous VMSA page */ /* Free up any previous VMSA page */
if (cur_vmsa) if (cur_vmsa)
snp_cleanup_vmsa(cur_vmsa); snp_cleanup_vmsa(cur_vmsa, apic_id);
/* Record the current VMSA page */ /* Record the current VMSA page */
per_cpu(sev_vmsa, cpu) = vmsa; per_cpu(sev_vmsa, cpu) = vmsa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment