Commit 07ae5389 authored by Alexander Graf's avatar Alexander Graf Committed by Paul Mackerras

KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled

When copying between the vcpu and svcpu, we may get scheduled away onto
a different host CPU which in turn means our svcpu pointer may change.

That means we need to atomically copy to and from the svcpu with preemption
disabled, so that all code around it always sees a coherent state.
Reported-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Fixes: 3d3319b4 ("KVM: PPC: Book3S: PR: Enable interrupts earlier")
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 36ee41d1
...@@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); ...@@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
extern int kvmppc_hcall_impl_pr(unsigned long cmd); extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
struct kvm_vcpu *vcpu); extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu);
extern int kvm_irq_bypass; extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
......
...@@ -96,7 +96,7 @@ kvm_start_entry: ...@@ -96,7 +96,7 @@ kvm_start_entry:
kvm_start_lightweight: kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */ /* Copy registers into shadow vcpu so we can access them in real mode */
GET_SHADOW_VCPU(r3) mr r3, r4
bl FUNC(kvmppc_copy_to_svcpu) bl FUNC(kvmppc_copy_to_svcpu)
nop nop
REST_GPR(4, r1) REST_GPR(4, r1)
...@@ -165,9 +165,7 @@ after_sprg3_load: ...@@ -165,9 +165,7 @@ after_sprg3_load:
stw r12, VCPU_TRAP(r3) stw r12, VCPU_TRAP(r3)
/* Transfer reg values from shadow vcpu back to vcpu struct */ /* Transfer reg values from shadow vcpu back to vcpu struct */
/* On 64-bit, interrupts are still off at this point */
GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu) bl FUNC(kvmppc_copy_from_svcpu)
nop nop
......
...@@ -120,7 +120,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) ...@@ -120,7 +120,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
if (svcpu->in_use) { if (svcpu->in_use) {
kvmppc_copy_from_svcpu(vcpu, svcpu); kvmppc_copy_from_svcpu(vcpu);
} }
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
...@@ -142,9 +142,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) ...@@ -142,9 +142,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
} }
/* Copy data needed by real-mode code from vcpu to shadow vcpu */ /* Copy data needed by real-mode code from vcpu to shadow vcpu */
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->gpr[0] = vcpu->arch.gpr[0]; svcpu->gpr[0] = vcpu->arch.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1]; svcpu->gpr[1] = vcpu->arch.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2]; svcpu->gpr[2] = vcpu->arch.gpr[2];
...@@ -176,17 +177,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, ...@@ -176,17 +177,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcpu->arch.entry_ic = mfspr(SPRN_IC); vcpu->arch.entry_ic = mfspr(SPRN_IC);
svcpu->in_use = true; svcpu->in_use = true;
svcpu_put(svcpu);
} }
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
struct kvmppc_book3s_shadow_vcpu *svcpu)
{ {
/* struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
* vcpu_put would just call us again because in_use hasn't
* been updated yet.
*/
preempt_disable();
/* /*
* Maybe we were already preempted and synced the svcpu from * Maybe we were already preempted and synced the svcpu from
...@@ -232,7 +230,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, ...@@ -232,7 +230,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
svcpu->in_use = false; svcpu->in_use = false;
out: out:
preempt_enable(); svcpu_put(svcpu);
} }
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment