Commit 647daca2 authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SVM: Add support for booting APs in an SEV-ES guest

Typically under KVM, an AP is booted using the INIT-SIPI-SIPI sequence,
where the guest vCPU register state is updated and then the vCPU is VMRUN
to begin execution of the AP. For an SEV-ES guest, this won't work because
the guest register state is encrypted.

Following the GHCB specification, the hypervisor must not alter the guest
register state, so KVM must track an AP/vCPU boot. Should the guest want
to park the AP, it must use the AP Reset Hold exit event in place of, for
example, a HLT loop.

First AP boot (first INIT-SIPI-SIPI sequence):
  Execute the AP (vCPU) as it was initialized and measured by the SEV-ES
  support. It is up to the guest to transfer control of the AP to the
  proper location.

Subsequent AP boot:
  KVM will expect to receive an AP Reset Hold exit event indicating that
  the vCPU is being parked and will require an INIT-SIPI-SIPI sequence to
  awaken it. When the AP Reset Hold exit event is received, KVM will place
  the vCPU into a simulated HLT mode. Upon receiving the INIT-SIPI-SIPI
  sequence, KVM will make the vCPU runnable. It is again up to the guest
  to then transfer control of the AP to the proper location.

  To differentiate between an actual HLT and an AP Reset Hold, a new MP
  state is introduced, KVM_MP_STATE_AP_RESET_HOLD, which the vCPU is
  placed in upon receiving the AP Reset Hold exit event. Additionally, to
  communicate the AP Reset Hold exit event up to userspace (if needed), a
  new exit reason is introduced, KVM_EXIT_AP_RESET_HOLD.

A new x86 ops function is introduced, vcpu_deliver_sipi_vector, in order
to accomplish AP booting. For VMX, vcpu_deliver_sipi_vector is set to the
original SIPI delivery function, kvm_vcpu_deliver_sipi_vector(). SVM adds
a new function that, for non SEV-ES guests, invokes the original SIPI
delivery function, kvm_vcpu_deliver_sipi_vector(), but for SEV-ES guests,
implements the logic above.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Message-Id: <e8fbebe8eb161ceaabdad7c01a5859a78b424d5e.1609791600.git.thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f2c7ef3b
...@@ -1299,6 +1299,8 @@ struct kvm_x86_ops { ...@@ -1299,6 +1299,8 @@ struct kvm_x86_ops {
void (*migrate_timers)(struct kvm_vcpu *vcpu); void (*migrate_timers)(struct kvm_vcpu *vcpu);
void (*msr_filter_changed)(struct kvm_vcpu *vcpu); void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
}; };
struct kvm_x86_nested_ops { struct kvm_x86_nested_ops {
...@@ -1480,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); ...@@ -1480,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu); int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_vcpu_halt(struct kvm_vcpu *vcpu); int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
......
...@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
/* evaluate pending_events before reading the vector */ /* evaluate pending_events before reading the vector */
smp_rmb(); smp_rmb();
sipi_vector = apic->sipi_vector; sipi_vector = apic->sipi_vector;
kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} }
} }
......
...@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_VMGEXIT_NMI_COMPLETE: case SVM_VMGEXIT_NMI_COMPLETE:
case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE: case SVM_VMGEXIT_AP_JUMP_TABLE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break; break;
...@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_NMI_COMPLETE: case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET); ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break; break;
case SVM_VMGEXIT_AP_HLT_LOOP:
ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
break;
case SVM_VMGEXIT_AP_JUMP_TABLE: { case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
...@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm) ...@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
} }
} }
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
struct vcpu_svm *svm = to_svm(vcpu);
/* First SIPI: Use the values as initially set by the VMM */
if (!svm->received_first_sipi) {
svm->received_first_sipi = true;
return;
}
/*
* Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value.
*/
ghcb_set_sw_exit_info_2(svm->ghcb, 1);
}
...@@ -4382,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) ...@@ -4382,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
(vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
} }
static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
if (!sev_es_guest(vcpu->kvm))
return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
sev_vcpu_deliver_sipi_vector(vcpu, vector);
}
static void svm_vm_destroy(struct kvm *kvm) static void svm_vm_destroy(struct kvm *kvm)
{ {
avic_vm_destroy(kvm); avic_vm_destroy(kvm);
...@@ -4524,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4524,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.msr_filter_changed = svm_msr_filter_changed, .msr_filter_changed = svm_msr_filter_changed,
.complete_emulated_msr = svm_complete_emulated_msr, .complete_emulated_msr = svm_complete_emulated_msr,
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
}; };
static struct kvm_x86_init_ops svm_init_ops __initdata = { static struct kvm_x86_init_ops svm_init_ops __initdata = {
......
...@@ -185,6 +185,7 @@ struct vcpu_svm { ...@@ -185,6 +185,7 @@ struct vcpu_svm {
struct vmcb_save_area *vmsa; struct vmcb_save_area *vmsa;
struct ghcb *ghcb; struct ghcb *ghcb;
struct kvm_host_map ghcb_map; struct kvm_host_map ghcb_map;
bool received_first_sipi;
/* SEV-ES scratch area support */ /* SEV-ES scratch area support */
void *ghcb_sa; void *ghcb_sa;
...@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm); ...@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu); void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm); void sev_es_vcpu_put(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
/* vmenter.S */ /* vmenter.S */
......
...@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.msr_filter_changed = vmx_msr_filter_changed, .msr_filter_changed = vmx_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp, .complete_emulated_msr = kvm_complete_insn_gp,
.cpu_dirty_log_size = vmx_cpu_dirty_log_size, .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
}; };
static __init int hardware_setup(void) static __init int hardware_setup(void)
......
...@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void) ...@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void)
kmem_cache_destroy(x86_fpu_cache); kmem_cache_destroy(x86_fpu_cache);
} }
int kvm_vcpu_halt(struct kvm_vcpu *vcpu) int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
{ {
++vcpu->stat.halt_exits; ++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) { if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED; vcpu->arch.mp_state = state;
return 1; return 1;
} else { } else {
vcpu->run->exit_reason = KVM_EXIT_HLT; vcpu->run->exit_reason = reason;
return 0; return 0;
} }
} }
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_halt); EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu) int kvm_emulate_halt(struct kvm_vcpu *vcpu)
...@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_emulate_halt); EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
{
int ret = kvm_skip_emulated_instruction(vcpu);
return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
}
EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type) unsigned long clock_type)
...@@ -9096,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -9096,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
kvm_apic_accept_events(vcpu); kvm_apic_accept_events(vcpu);
switch(vcpu->arch.mp_state) { switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED: case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
vcpu->arch.pv.pv_unhalted = false; vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state = vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE; KVM_MP_STATE_RUNNABLE;
...@@ -9522,7 +9536,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, ...@@ -9522,7 +9536,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
kvm_apic_accept_events(vcpu); kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
vcpu->arch.pv.pv_unhalted) vcpu->arch.pv.pv_unhalted)
mp_state->mp_state = KVM_MP_STATE_RUNNABLE; mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
else else
...@@ -10154,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -10154,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
kvm_rip_write(vcpu, 0); kvm_rip_write(vcpu, 0);
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
int kvm_arch_hardware_enable(void) int kvm_arch_hardware_enable(void)
{ {
......
...@@ -251,6 +251,7 @@ struct kvm_hyperv_exit { ...@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_X86_RDMSR 29 #define KVM_EXIT_X86_RDMSR 29
#define KVM_EXIT_X86_WRMSR 30 #define KVM_EXIT_X86_WRMSR 30
#define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_DIRTY_RING_FULL 31
#define KVM_EXIT_AP_RESET_HOLD 32
/* For KVM_EXIT_INTERNAL_ERROR */ /* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */ /* Emulate instruction failed. */
...@@ -573,6 +574,7 @@ struct kvm_vapic_addr { ...@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6 #define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7 #define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8 #define KVM_MP_STATE_LOAD 8
#define KVM_MP_STATE_AP_RESET_HOLD 9
struct kvm_mp_state { struct kvm_mp_state {
__u32 mp_state; __u32 mp_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment