Commit 4fe09bcf authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

KVM: x86: Add a return code to kvm_apic_accept_events

No functional change intended. At present, the only negative value
returned by kvm_check_nested_events is -EBUSY.
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Message-Id: <20210604172611.281819-6-jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a5f6909a
...@@ -2872,7 +2872,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) ...@@ -2872,7 +2872,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len); return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
} }
void kvm_apic_accept_events(struct kvm_vcpu *vcpu) int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector; u8 sipi_vector;
...@@ -2880,7 +2880,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2880,7 +2880,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
unsigned long pe; unsigned long pe;
if (!lapic_in_kernel(vcpu)) if (!lapic_in_kernel(vcpu))
return; return 0;
/* /*
* Read pending events before calling the check_events * Read pending events before calling the check_events
...@@ -2888,12 +2888,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2888,12 +2888,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
*/ */
pe = smp_load_acquire(&apic->pending_events); pe = smp_load_acquire(&apic->pending_events);
if (!pe) if (!pe)
return; return 0;
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
r = kvm_check_nested_events(vcpu); r = kvm_check_nested_events(vcpu);
if (r < 0) if (r < 0)
return; return r == -EBUSY ? 0 : r;
/* /*
* If an event has happened and caused a vmexit, * If an event has happened and caused a vmexit,
* we know INITs are latched and therefore * we know INITs are latched and therefore
...@@ -2914,7 +2914,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2914,7 +2914,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &pe)) if (test_bit(KVM_APIC_SIPI, &pe))
clear_bit(KVM_APIC_SIPI, &apic->pending_events); clear_bit(KVM_APIC_SIPI, &apic->pending_events);
return; return 0;
} }
if (test_bit(KVM_APIC_INIT, &pe)) { if (test_bit(KVM_APIC_INIT, &pe)) {
...@@ -2935,6 +2935,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2935,6 +2935,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} }
} }
return 0;
} }
void kvm_lapic_exit(void) void kvm_lapic_exit(void)
......
...@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu); ...@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
void kvm_apic_accept_events(struct kvm_vcpu *vcpu); int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
......
...@@ -9335,7 +9335,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -9335,7 +9335,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
kvm_xen_has_interrupt(vcpu)) { kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event; ++vcpu->stat.req_event;
kvm_apic_accept_events(vcpu); r = kvm_apic_accept_events(vcpu);
if (r < 0) {
r = 0;
goto out;
}
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
r = 1; r = 1;
goto out; goto out;
...@@ -9547,7 +9551,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -9547,7 +9551,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1; return 1;
} }
kvm_apic_accept_events(vcpu); if (kvm_apic_accept_events(vcpu) < 0)
return 0;
switch(vcpu->arch.mp_state) { switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED: case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD: case KVM_MP_STATE_AP_RESET_HOLD:
...@@ -9771,7 +9776,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -9771,7 +9776,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
kvm_apic_accept_events(vcpu); if (kvm_apic_accept_events(vcpu) < 0) {
r = 0;
goto out;
}
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN; r = -EAGAIN;
if (signal_pending(current)) { if (signal_pending(current)) {
...@@ -9973,11 +9981,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -9973,11 +9981,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
int r;
vcpu_load(vcpu); vcpu_load(vcpu);
if (kvm_mpx_supported()) if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
kvm_apic_accept_events(vcpu); r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
r = 0;
if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
vcpu->arch.pv.pv_unhalted) vcpu->arch.pv.pv_unhalted)
...@@ -9985,10 +9999,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, ...@@ -9985,10 +9999,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else else
mp_state->mp_state = vcpu->arch.mp_state; mp_state->mp_state = vcpu->arch.mp_state;
out:
if (kvm_mpx_supported()) if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu); kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return 0; return r;
} }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment