Commit 7e5b5ef8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: SVM: Re-inject INTn instead of retrying the insn on "failure"

Re-inject INTn software interrupts instead of retrying the instruction if
the CPU encountered an intercepted exception while vectoring the INTn,
e.g. if KVM intercepted a #PF when utilizing shadow paging.  Retrying the
instruction is architecturally wrong e.g. will result in a spurious #DB
if there's a code breakpoint on the INT3/O, and lack of re-injection also
breaks nested virtualization, e.g. if L1 injects a software interrupt and
vectoring the injected interrupt encounters an exception that is
intercepted by L0 but not L1.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <1654ad502f860948e4f2d57b8bd881d67301f785.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6ef88d6e
...@@ -617,10 +617,9 @@ static inline bool is_evtinj_soft(u32 evtinj) ...@@ -617,10 +617,9 @@ static inline bool is_evtinj_soft(u32 evtinj)
if (!(evtinj & SVM_EVTINJ_VALID)) if (!(evtinj & SVM_EVTINJ_VALID))
return false; return false;
/* if (type == SVM_EVTINJ_TYPE_SOFT)
* Intentionally return false for SOFT events, SVM doesn't yet support return true;
* re-injecting soft interrupts.
*/
return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector); return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
} }
......
...@@ -3438,12 +3438,22 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) ...@@ -3438,12 +3438,22 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
static void svm_inject_irq(struct kvm_vcpu *vcpu) static void svm_inject_irq(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u32 type;
if (vcpu->arch.interrupt.soft) {
if (svm_update_soft_interrupt_rip(vcpu))
return;
type = SVM_EVTINJ_TYPE_SOFT;
} else {
type = SVM_EVTINJ_TYPE_INTR;
}
trace_kvm_inj_virq(vcpu->arch.interrupt.nr); trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
++vcpu->stat.irq_injections; ++vcpu->stat.irq_injections;
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; SVM_EVTINJ_VALID | type;
} }
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
...@@ -3723,6 +3733,8 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) ...@@ -3723,6 +3733,8 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
int type) int type)
{ {
bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
/* /*
...@@ -3734,8 +3746,7 @@ static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, ...@@ -3734,8 +3746,7 @@ static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
* the same event, i.e. if the event is a soft exception/interrupt, * the same event, i.e. if the event is a soft exception/interrupt,
* otherwise next_rip is unused on VMRUN. * otherwise next_rip is unused on VMRUN.
*/ */
if (nrips && type == SVM_EXITINTINFO_TYPE_EXEPT && if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
kvm_exception_is_soft(vector) &&
kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
svm->vmcb->control.next_rip = svm->soft_int_next_rip; svm->vmcb->control.next_rip = svm->soft_int_next_rip;
/* /*
...@@ -3746,7 +3757,7 @@ static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, ...@@ -3746,7 +3757,7 @@ static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
* hit a #NP in the guest, and the #NP encountered a #PF, the #NP will * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
* be the reported vectored event, but RIP still needs to be unwound. * be the reported vectored event, but RIP still needs to be unwound.
*/ */
else if (!nrips && type == SVM_EXITINTINFO_TYPE_EXEPT && else if (!nrips && (is_soft || is_exception) &&
kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
kvm_rip_write(vcpu, svm->soft_int_old_rip); kvm_rip_write(vcpu, svm->soft_int_old_rip);
} }
...@@ -3808,9 +3819,13 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu) ...@@ -3808,9 +3819,13 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(vcpu, vector, false); kvm_queue_interrupt(vcpu, vector, false);
break; break;
case SVM_EXITINTINFO_TYPE_SOFT:
kvm_queue_interrupt(vcpu, vector, true);
break;
default: default:
break; break;
} }
} }
static void svm_cancel_injection(struct kvm_vcpu *vcpu) static void svm_cancel_injection(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment