Commit ef85b673 authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: nVMX: Allow L1 to intercept software exceptions (#BP and #OF)

When L2 exits to L0 due to "exception or NMI", software exceptions
(#BP and #OF) for which L1 has requested an intercept should be
handled by L1 rather than L0. Previously, only hardware exceptions
were forwarded to L1.
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cc0d907c
...@@ -1389,10 +1389,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) ...@@ -1389,10 +1389,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
} }
static inline bool is_exception(u32 intr_info) static inline bool is_nmi(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
} }
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
...@@ -5728,7 +5728,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) ...@@ -5728,7 +5728,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (is_machine_check(intr_info)) if (is_machine_check(intr_info))
return handle_machine_check(vcpu); return handle_machine_check(vcpu);
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) if (is_nmi(intr_info))
return 1; /* already handled by vmx_vcpu_run() */ return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) { if (is_no_device(intr_info)) {
...@@ -8170,7 +8170,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -8170,7 +8170,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
switch (exit_reason) { switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI: case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info)) if (is_nmi(intr_info))
return false; return false;
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return enable_ept; return enable_ept;
...@@ -8765,8 +8765,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) ...@@ -8765,8 +8765,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
kvm_machine_check(); kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */ /* We need to handle NMIs before interrupts are enabled */
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && if (is_nmi(exit_intr_info)) {
(exit_intr_info & INTR_INFO_VALID_MASK)) {
kvm_before_handle_nmi(&vmx->vcpu); kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2"); asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu); kvm_after_handle_nmi(&vmx->vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment