Commit f1e89028 authored by Scott Wood's avatar Scott Wood Committed by Gleb Natapov

kvm/ppc/booke: Hold srcu lock when calling gfn functions

KVM core expects arch code to acquire the srcu lock when calling
gfn_to_memslot and similar functions.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 2b6398fc
...@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe; struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index; unsigned int gtlb_index;
int idx;
gtlb_index = kvmppc_get_gpr(vcpu, ra); gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
...@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL; return EMULATE_FAIL;
} }
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (tlbe_is_host_safe(vcpu, tlbe)) { if (tlbe_is_host_safe(vcpu, tlbe)) {
gva_t eaddr; gva_t eaddr;
gpa_t gpaddr; gpa_t gpaddr;
...@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
} }
srcu_read_unlock(&vcpu->kvm->srcu, idx);
trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
tlbe->word2); tlbe->word2);
......
...@@ -832,6 +832,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -832,6 +832,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
{ {
int r = RESUME_HOST; int r = RESUME_HOST;
int s; int s;
int idx;
/* update before a new last_exit_type is rewritten */ /* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu); kvmppc_update_timing_stats(vcpu);
...@@ -1053,6 +1054,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1053,6 +1054,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
} }
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT; gfn = gpaddr >> PAGE_SHIFT;
...@@ -1075,6 +1078,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1075,6 +1078,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, MMIO_EXITS); kvmppc_account_exit(vcpu, MMIO_EXITS);
} }
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break; break;
} }
...@@ -1098,6 +1102,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1098,6 +1102,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT; gfn = gpaddr >> PAGE_SHIFT;
...@@ -1114,6 +1120,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1114,6 +1120,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
} }
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break; break;
} }
......
...@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe; struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel; int tlbsel, esel;
int recal = 0; int recal = 0;
int idx;
tlbsel = get_tlb_tlbsel(vcpu); tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel); esel = get_tlb_esel(vcpu, tlbsel);
...@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_set_tlb1map_range(vcpu, gtlbe); kvmppc_set_tlb1map_range(vcpu, gtlbe);
} }
idx = srcu_read_lock(&vcpu->kvm->srcu);
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) { if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr = get_tlb_eaddr(gtlbe); u64 eaddr = get_tlb_eaddr(gtlbe);
...@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
} }
srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE; return EMULATE_DONE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment