Commit 86137773 authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SVM: Provide support for SEV-ES vCPU loading

An SEV-ES vCPU requires additional VMCB vCPU load/put requirements. SEV-ES
hardware will restore certain registers on VMEXIT, but not save them on
VMRUN (see Table B-3 and Table B-4 of the AMD64 APM Volume 2), so make the
following changes:

General vCPU load changes:
  - During vCPU loading, perform a VMSAVE to the per-CPU SVM save area and
    save the current values of XCR0, XSS and PKRU to the per-CPU SVM save
    area as these registers will be restored on VMEXIT.

General vCPU put changes:
  - Do not attempt to restore registers that SEV-ES hardware has already
    restored on VMEXIT.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Message-Id: <019390e9cb5e93cd73014fa5a040c17d42588733.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 376c6d28
...@@ -234,7 +234,8 @@ struct vmcb_save_area { ...@@ -234,7 +234,8 @@ struct vmcb_save_area {
u8 cpl; u8 cpl;
u8 reserved_2[4]; u8 reserved_2[4];
u64 efer; u64 efer;
u8 reserved_3[112]; u8 reserved_3[104];
u64 xss; /* Valid for SEV-ES only */
u64 cr4; u64 cr4;
u64 cr3; u64 cr3;
u64 cr0; u64 cr0;
...@@ -265,9 +266,12 @@ struct vmcb_save_area { ...@@ -265,9 +266,12 @@ struct vmcb_save_area {
/* /*
* The following part of the save area is valid only for * The following part of the save area is valid only for
* SEV-ES guests when referenced through the GHCB. * SEV-ES guests when referenced through the GHCB or for
* saving to the host save area.
*/ */
u8 reserved_7[104]; u8 reserved_7[80];
u32 pkru;
u8 reserved_7a[20];
u64 reserved_8; /* rax already available at 0x01f8 */ u64 reserved_8; /* rax already available at 0x01f8 */
u64 rcx; u64 rcx;
u64 rdx; u64 rdx;
......
...@@ -16,12 +16,15 @@ ...@@ -16,12 +16,15 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/processor.h> #include <linux/processor.h>
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <asm/fpu/internal.h>
#include "x86.h" #include "x86.h"
#include "svm.h" #include "svm.h"
#include "cpuid.h" #include "cpuid.h"
#include "trace.h" #include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
static u8 sev_enc_bit; static u8 sev_enc_bit;
static int sev_flush_asids(void); static int sev_flush_asids(void);
static DECLARE_RWSEM(sev_deactivate_lock); static DECLARE_RWSEM(sev_deactivate_lock);
...@@ -1852,3 +1855,54 @@ void sev_es_create_vcpu(struct vcpu_svm *svm) ...@@ -1852,3 +1855,54 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
GHCB_VERSION_MIN, GHCB_VERSION_MIN,
sev_enc_bit)); sev_enc_bit));
} }
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
unsigned int i;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
/*
* Certain MSRs are restored on VMEXIT, only save ones that aren't
* restored.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
if (host_save_user_msrs[i].sev_es_restored)
continue;
rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
/* PKRU is restored on VMEXIT, save the curent host value */
hostsa->pkru = read_pkru();
/* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
hostsa->xss = host_xss;
}
void sev_es_vcpu_put(struct vcpu_svm *svm)
{
unsigned int i;
/*
* Certain MSRs are restored on VMEXIT and were saved with vmsave in
* sev_es_vcpu_load() above. Only restore ones that weren't.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
if (host_save_user_msrs[i].sev_es_restored)
continue;
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
}
...@@ -1417,15 +1417,20 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1417,15 +1417,20 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmcb_mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
} }
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_load(svm, cpu);
} else {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif #endif
savesegment(fs, svm->host.fs); savesegment(fs, svm->host.fs);
savesegment(gs, svm->host.gs); savesegment(gs, svm->host.gs);
svm->host.ldt = kvm_read_ldt(); svm->host.ldt = kvm_read_ldt();
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); rdmsrl(host_save_user_msrs[i].index,
svm->host_user_msrs[i]);
}
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
...@@ -1453,19 +1458,24 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1453,19 +1458,24 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
avic_vcpu_put(vcpu); avic_vcpu_put(vcpu);
++vcpu->stat.host_state_reload; ++vcpu->stat.host_state_reload;
kvm_load_ldt(svm->host.ldt); if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_put(svm);
} else {
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs); loadsegment(fs, svm->host.fs);
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
load_gs_index(svm->host.gs); load_gs_index(svm->host.gs);
#else #else
#ifdef CONFIG_X86_32_LAZY_GS #ifdef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs); loadsegment(gs, svm->host.gs);
#endif #endif
#endif #endif
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); wrmsrl(host_save_user_msrs[i].index,
svm->host_user_msrs[i]);
}
} }
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
......
...@@ -23,15 +23,23 @@ ...@@ -23,15 +23,23 @@
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
static const u32 host_save_user_msrs[] = { static const struct svm_host_save_msrs {
u32 index; /* Index of the MSR */
bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
} host_save_user_msrs[] = {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, { .index = MSR_STAR, .sev_es_restored = true },
MSR_FS_BASE, { .index = MSR_LSTAR, .sev_es_restored = true },
{ .index = MSR_CSTAR, .sev_es_restored = true },
{ .index = MSR_SYSCALL_MASK, .sev_es_restored = true },
{ .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true },
{ .index = MSR_FS_BASE, .sev_es_restored = true },
#endif #endif
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true },
MSR_TSC_AUX, { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true },
{ .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true },
{ .index = MSR_TSC_AUX, .sev_es_restored = false },
}; };
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
#define MAX_DIRECT_ACCESS_MSRS 18 #define MAX_DIRECT_ACCESS_MSRS 18
...@@ -580,5 +588,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm); ...@@ -580,5 +588,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
#endif #endif
...@@ -197,7 +197,8 @@ EXPORT_SYMBOL_GPL(host_efer); ...@@ -197,7 +197,8 @@ EXPORT_SYMBOL_GPL(host_efer);
bool __read_mostly allow_smaller_maxphyaddr = 0; bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
static u64 __read_mostly host_xss; u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss; u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss); EXPORT_SYMBOL_GPL(supported_xss);
......
...@@ -279,6 +279,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); ...@@ -279,6 +279,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
extern u64 host_xcr0; extern u64 host_xcr0;
extern u64 supported_xcr0; extern u64 supported_xcr0;
extern u64 host_xss;
extern u64 supported_xss; extern u64 supported_xss;
static inline bool kvm_mpx_supported(void) static inline bool kvm_mpx_supported(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment