Commit 7709aba8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Morph pending exceptions to pending VM-Exits at queue time

Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry.  This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2.  Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.

KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.

The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1.  Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.

Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.

When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated.  Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal.  The injected exception is captured, RIP
still points at the original faulting instruction, etc...  So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.

Fixes: a04aead1 ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7d ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.comSigned-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f43f8a3b
...@@ -649,7 +649,6 @@ struct kvm_queued_exception { ...@@ -649,7 +649,6 @@ struct kvm_queued_exception {
u32 error_code; u32 error_code;
unsigned long payload; unsigned long payload;
bool has_payload; bool has_payload;
u8 nested_apf;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -750,8 +749,12 @@ struct kvm_vcpu_arch { ...@@ -750,8 +749,12 @@ struct kvm_vcpu_arch {
u8 event_exit_inst_len; u8 event_exit_inst_len;
bool exception_from_userspace;
/* Exceptions to be injected to the guest. */ /* Exceptions to be injected to the guest. */
struct kvm_queued_exception exception; struct kvm_queued_exception exception;
/* Exception VM-Exits to be synthesized to L1. */
struct kvm_queued_exception exception_vmexit;
struct kvm_queued_interrupt { struct kvm_queued_interrupt {
bool injected; bool injected;
...@@ -862,7 +865,6 @@ struct kvm_vcpu_arch { ...@@ -862,7 +865,6 @@ struct kvm_vcpu_arch {
u32 id; u32 id;
bool send_user_only; bool send_user_only;
u32 host_apf_flags; u32 host_apf_flags;
unsigned long nested_apf_token;
bool delivery_as_pf_vmexit; bool delivery_as_pf_vmexit;
bool pageready_pending; bool pageready_pending;
} apf; } apf;
...@@ -1638,9 +1640,9 @@ struct kvm_x86_ops { ...@@ -1638,9 +1640,9 @@ struct kvm_x86_ops {
struct kvm_x86_nested_ops { struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu); void (*leave_nested)(struct kvm_vcpu *vcpu);
bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
u32 error_code);
int (*check_events)(struct kvm_vcpu *vcpu); int (*check_events)(struct kvm_vcpu *vcpu);
bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
void (*triple_fault)(struct kvm_vcpu *vcpu); void (*triple_fault)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu, int (*get_state)(struct kvm_vcpu *vcpu,
...@@ -1867,7 +1869,7 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long pay ...@@ -1867,7 +1869,7 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long pay
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault); struct x86_exception *fault);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
......
...@@ -55,28 +55,6 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, ...@@ -55,28 +55,6 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm); nested_svm_vmexit(svm);
} }
static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
WARN_ON(!is_guest_mode(vcpu));
if (vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
!WARN_ON_ONCE(svm->nested.nested_run_pending)) {
vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
vmcb->control.exit_code_hi = 0;
vmcb->control.exit_info_1 = fault->error_code;
vmcb->control.exit_info_2 = fault->address;
nested_svm_vmexit(svm);
return true;
}
return false;
}
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -1308,16 +1286,17 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu) ...@@ -1308,16 +1286,17 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool nested_exit_on_exception(struct vcpu_svm *svm) static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
u32 error_code)
{ {
unsigned int vector = svm->vcpu.arch.exception.vector; struct vcpu_svm *svm = to_svm(vcpu);
return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector)); return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
} }
static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu) static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
{ {
struct kvm_queued_exception *ex = &vcpu->arch.exception; struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
...@@ -1332,9 +1311,7 @@ static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu) ...@@ -1332,9 +1311,7 @@ static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
* than #PF. * than #PF.
*/ */
if (ex->vector == PF_VECTOR) { if (ex->vector == PF_VECTOR) {
if (ex->nested_apf) if (ex->has_payload)
vmcb->control.exit_info_2 = vcpu->arch.apf.nested_apf_token;
else if (ex->has_payload)
vmcb->control.exit_info_2 = ex->payload; vmcb->control.exit_info_2 = ex->payload;
else else
vmcb->control.exit_info_2 = vcpu->arch.cr2; vmcb->control.exit_info_2 = vcpu->arch.cr2;
...@@ -1387,15 +1364,19 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -1387,15 +1364,19 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
if (vcpu->arch.exception.pending) { if (vcpu->arch.exception_vmexit.pending) {
if (block_nested_exceptions) if (block_nested_exceptions)
return -EBUSY; return -EBUSY;
if (!nested_exit_on_exception(svm))
return 0;
nested_svm_inject_exception_vmexit(vcpu); nested_svm_inject_exception_vmexit(vcpu);
return 0; return 0;
} }
if (vcpu->arch.exception.pending) {
if (block_nested_exceptions)
return -EBUSY;
return 0;
}
if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
if (block_nested_events) if (block_nested_events)
return -EBUSY; return -EBUSY;
...@@ -1733,8 +1714,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) ...@@ -1733,8 +1714,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
struct kvm_x86_nested_ops svm_nested_ops = { struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested, .leave_nested = svm_leave_nested,
.is_exception_vmexit = nested_svm_is_exception_vmexit,
.check_events = svm_check_nested_events, .check_events = svm_check_nested_events,
.handle_page_fault_workaround = nested_svm_handle_page_fault_workaround,
.triple_fault = nested_svm_triple_fault, .triple_fault = nested_svm_triple_fault,
.get_nested_state_pages = svm_get_nested_state_pages, .get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state, .get_state = svm_get_nested_state,
......
...@@ -439,59 +439,22 @@ static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, ...@@ -439,59 +439,22 @@ static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
return inequality ^ bit; return inequality ^ bit;
} }
static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
/* u32 error_code)
* KVM wants to inject page-faults which it got to the guest. This function
* checks whether in a nested guest, we need to inject them to L1 or L2.
*/
static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
{ {
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (ex->vector == PF_VECTOR) { /*
if (ex->nested_apf) { * Drop bits 31:16 of the error code when performing the #PF mask+match
*exit_qual = vcpu->arch.apf.nested_apf_token; * check. All VMCS fields involved are 32 bits, but Intel CPUs never
return 1; * set bits 31:16 and VMX disallows setting bits 31:16 in the injected
} * error code. Including the to-be-dropped bits in the check might
if (nested_vmx_is_page_fault_vmexit(vmcs12, ex->error_code)) { * result in an "impossible" or missed exit from L1's perspective.
*exit_qual = ex->has_payload ? ex->payload : vcpu->arch.cr2; */
return 1; if (vector == PF_VECTOR)
} return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code);
} else if (vmcs12->exception_bitmap & (1u << ex->vector)) {
if (ex->vector == DB_VECTOR) {
if (ex->has_payload) {
*exit_qual = ex->payload;
} else {
*exit_qual = vcpu->arch.dr6;
*exit_qual &= ~DR6_BT;
*exit_qual ^= DR6_ACTIVE_LOW;
}
} else
*exit_qual = 0;
return 1;
}
return 0; return (vmcs12->exception_bitmap & (1u << vector));
}
static bool nested_vmx_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
WARN_ON(!is_guest_mode(vcpu));
if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
!WARN_ON_ONCE(to_vmx(vcpu)->nested.nested_run_pending)) {
vmcs12->vm_exit_intr_error_code = fault->error_code;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
fault->address);
return true;
}
return false;
} }
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
...@@ -3863,12 +3826,24 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) ...@@ -3863,12 +3826,24 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
return -ENXIO; return -ENXIO;
} }
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
unsigned long exit_qual)
{ {
struct kvm_queued_exception *ex = &vcpu->arch.exception; struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned long exit_qual;
if (ex->has_payload) {
exit_qual = ex->payload;
} else if (ex->vector == PF_VECTOR) {
exit_qual = vcpu->arch.cr2;
} else if (ex->vector == DB_VECTOR) {
exit_qual = vcpu->arch.dr6;
exit_qual &= ~DR6_BT;
exit_qual ^= DR6_ACTIVE_LOW;
} else {
exit_qual = 0;
}
if (ex->has_error_code) { if (ex->has_error_code) {
/* /*
...@@ -4041,7 +4016,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -4041,7 +4016,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
/* /*
* Only a pending nested run blocks a pending exception. If there is a * Only a pending nested run blocks a pending exception. If there is a
* previously injected event, the pending exception occurred while said * previously injected event, the pending exception occurred while said
...@@ -4095,14 +4069,20 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -4095,14 +4069,20 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
* across SMI/RSM as it should; that needs to be addressed in order to * across SMI/RSM as it should; that needs to be addressed in order to
* prioritize SMI over MTF and trap-like #DBs. * prioritize SMI over MTF and trap-like #DBs.
*/ */
if (vcpu->arch.exception_vmexit.pending &&
!vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) {
if (block_nested_exceptions)
return -EBUSY;
nested_vmx_inject_exception_vmexit(vcpu);
return 0;
}
if (vcpu->arch.exception.pending && if (vcpu->arch.exception.pending &&
!vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) {
if (block_nested_exceptions) if (block_nested_exceptions)
return -EBUSY; return -EBUSY;
if (!nested_vmx_check_exception(vcpu, &exit_qual))
goto no_vmexit; goto no_vmexit;
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
return 0;
} }
if (vmx->nested.mtf_pending) { if (vmx->nested.mtf_pending) {
...@@ -4113,13 +4093,18 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -4113,13 +4093,18 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
if (vcpu->arch.exception_vmexit.pending) {
if (block_nested_exceptions)
return -EBUSY;
nested_vmx_inject_exception_vmexit(vcpu);
return 0;
}
if (vcpu->arch.exception.pending) { if (vcpu->arch.exception.pending) {
if (block_nested_exceptions) if (block_nested_exceptions)
return -EBUSY; return -EBUSY;
if (!nested_vmx_check_exception(vcpu, &exit_qual))
goto no_vmexit; goto no_vmexit;
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
return 0;
} }
if (nested_vmx_preemption_timer_pending(vcpu)) { if (nested_vmx_preemption_timer_pending(vcpu)) {
...@@ -6984,8 +6969,8 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) ...@@ -6984,8 +6969,8 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
struct kvm_x86_nested_ops vmx_nested_ops = { struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested, .leave_nested = vmx_leave_nested,
.is_exception_vmexit = nested_vmx_is_exception_vmexit,
.check_events = vmx_check_nested_events, .check_events = vmx_check_nested_events,
.handle_page_fault_workaround = nested_vmx_handle_page_fault_workaround,
.hv_timer_pending = nested_vmx_preemption_timer_pending, .hv_timer_pending = nested_vmx_preemption_timer_pending,
.triple_fault = nested_vmx_triple_fault, .triple_fault = nested_vmx_triple_fault,
.get_state = vmx_get_nested_state, .get_state = vmx_get_nested_state,
......
...@@ -1659,7 +1659,9 @@ static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -1659,7 +1659,9 @@ static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
*/ */
if (nested_cpu_has_mtf(vmcs12) && if (nested_cpu_has_mtf(vmcs12) &&
(!vcpu->arch.exception.pending || (!vcpu->arch.exception.pending ||
vcpu->arch.exception.vector == DB_VECTOR)) vcpu->arch.exception.vector == DB_VECTOR) &&
(!vcpu->arch.exception_vmexit.pending ||
vcpu->arch.exception_vmexit.vector == DB_VECTOR))
vmx->nested.mtf_pending = true; vmx->nested.mtf_pending = true;
else else
vmx->nested.mtf_pending = false; vmx->nested.mtf_pending = false;
...@@ -5692,7 +5694,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu) ...@@ -5692,7 +5694,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
return vmx->emulation_required && !vmx->rmode.vm86_active && return vmx->emulation_required && !vmx->rmode.vm86_active &&
(vcpu->arch.exception.pending || vcpu->arch.exception.injected); (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
} }
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
......
This diff is collapsed.
...@@ -82,10 +82,17 @@ static inline unsigned int __shrink_ple_window(unsigned int val, ...@@ -82,10 +82,17 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu);
static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
{
return vcpu->arch.exception.pending ||
vcpu->arch.exception_vmexit.pending;
}
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;
vcpu->arch.exception.injected = false; vcpu->arch.exception.injected = false;
vcpu->arch.exception_vmexit.pending = false;
} }
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment