Commit 36e2e983 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: nSVM: synthesize correct EXITINTINFO on vmexit

This bit was added to nested VMX right when nested_run_pending was
introduced, but it is not yet there in nSVM.  Since we can have pending
events that L0 injected directly into L2 on vmentry, we have to transfer
them into L1's queue.

For this to work, one important change is required: svm_complete_interrupts
(which clears the "injected" fields from the previous VMRUN, and updates them
from svm->vmcb's EXITINTINFO) must be placed before we inject the vmexit.
This is not too scary though; VMX even does it in vmx_vcpu_run.

While at it, the nested_vmexit_inject tracepoint is moved towards the
end of nested_svm_vmexit.  This ensures that the synthesized EXITINTINFO
is visible in the trace.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 91b7130c
...@@ -262,6 +262,43 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm) ...@@ -262,6 +262,43 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm)
svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
} }
/*
* Transfer any event that L0 or L1 wanted to inject into L2 to
* EXIT_INT_INFO.
*/
static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
struct vmcb *nested_vmcb)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
u32 exit_int_info = 0;
unsigned int nr;
if (vcpu->arch.exception.injected) {
nr = vcpu->arch.exception.nr;
exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
if (vcpu->arch.exception.has_error_code) {
exit_int_info |= SVM_EVTINJ_VALID_ERR;
nested_vmcb->control.exit_int_info_err =
vcpu->arch.exception.error_code;
}
} else if (vcpu->arch.nmi_injected) {
exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
} else if (vcpu->arch.interrupt.injected) {
nr = vcpu->arch.interrupt.nr;
exit_int_info = nr | SVM_EVTINJ_VALID;
if (vcpu->arch.interrupt.soft)
exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
else
exit_int_info |= SVM_EVTINJ_TYPE_INTR;
}
nested_vmcb->control.exit_int_info = exit_int_info;
}
static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
{ {
/* Load the nested guest state */ /* Load the nested guest state */
...@@ -466,13 +503,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -466,13 +503,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map; struct kvm_host_map map;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
vmcb->control.exit_info_2,
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
if (rc) { if (rc) {
if (rc == -EINVAL) if (rc == -EINVAL)
...@@ -517,8 +547,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -517,8 +547,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; if (nested_vmcb->control.exit_code != SVM_EXIT_ERR)
nested_vmcb_save_pending_event(svm, nested_vmcb);
if (svm->nrips_enabled) if (svm->nrips_enabled)
nested_vmcb->control.next_rip = vmcb->control.next_rip; nested_vmcb->control.next_rip = vmcb->control.next_rip;
...@@ -539,9 +570,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -539,9 +570,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset; svm->vcpu.arch.l1_tsc_offset;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
svm->nested.ctl.nested_cr3 = 0; svm->nested.ctl.nested_cr3 = 0;
/* Restore selected save entries */ /* Restore selected save entries */
...@@ -570,6 +598,13 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -570,6 +598,13 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
mark_all_dirty(svm->vmcb); mark_all_dirty(svm->vmcb);
trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code,
nested_vmcb->control.exit_info_1,
nested_vmcb->control.exit_info_2,
nested_vmcb->control.exit_int_info,
nested_vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(&svm->vcpu, &map, true);
nested_svm_uninit_mmu_context(&svm->vcpu); nested_svm_uninit_mmu_context(&svm->vcpu);
......
...@@ -2913,6 +2913,8 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) ...@@ -2913,6 +2913,8 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled) if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3; vcpu->arch.cr3 = svm->vmcb->save.cr3;
svm_complete_interrupts(svm);
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
int vmexit; int vmexit;
...@@ -2932,8 +2934,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) ...@@ -2932,8 +2934,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 1; return 1;
} }
svm_complete_interrupts(svm);
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason kvm_run->fail_entry.hardware_entry_failure_reason
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment