Commit 7d82714d authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Book3s: PR: Disable preemption in vcpu_run

When entering the guest, we want to make sure we're not getting preempted
away, so let's disable preemption on entry, but enable it again while handling
guest exits.
Reported-by: default avatarJörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent dfd4d47e
...@@ -517,6 +517,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -517,6 +517,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
trace_kvm_book3s_exit(exit_nr, vcpu); trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable();
kvm_resched(vcpu); kvm_resched(vcpu);
switch (exit_nr) { switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE: case BOOK3S_INTERRUPT_INST_STORAGE:
...@@ -761,6 +762,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -761,6 +762,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
r = -EINTR; r = -EINTR;
} else { } else {
preempt_disable();
/* In case an interrupt came in that was triggered /* In case an interrupt came in that was triggered
* from userspace (like DEC), we need to check what * from userspace (like DEC), we need to check what
* to inject now! */ * to inject now! */
...@@ -923,10 +926,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -923,10 +926,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif #endif
ulong ext_msr; ulong ext_msr;
preempt_disable();
/* Check if we can run the vcpu at all */ /* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; ret = -EINVAL;
goto out;
} }
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
...@@ -934,7 +940,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -934,7 +940,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* No need to go into the guest when all we do is going out */ /* No need to go into the guest when all we do is going out */
if (signal_pending(current)) { if (signal_pending(current)) {
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
return -EINTR; ret = -EINTR;
goto out;
} }
/* Save FPU state in stack */ /* Save FPU state in stack */
...@@ -1004,6 +1011,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1004,6 +1011,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.used_vsr = used_vsr; current->thread.used_vsr = used_vsr;
#endif #endif
out:
preempt_enable();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment