Commit 3f5ad8be authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: hyperv: fix locking of struct kvm_hv fields

Introduce a new mutex to avoid an AB-BA deadlock between kvm->lock and
vcpu->mutex.  Protect accesses in kvm_hv_setup_tsc_page too, as suggested
by Roman.
Reported-by: default avatarDmitry Vyukov <dvyukov@google.com>
Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 83781d18
...@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows: ...@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare. them together is quite rare.
For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
else is a leaf: no other lock is taken inside the critical sections.
For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
Everything else is a leaf: no other lock is taken inside the critical
sections.
2: Exception 2: Exception
------------ ------------
......
...@@ -704,6 +704,7 @@ struct kvm_apic_map { ...@@ -704,6 +704,7 @@ struct kvm_apic_map {
/* Hyper-V emulation context */ /* Hyper-V emulation context */
struct kvm_hv { struct kvm_hv {
struct mutex hv_lock;
u64 hv_guest_os_id; u64 hv_guest_os_id;
u64 hv_hypercall; u64 hv_hypercall;
u64 hv_tsc_page; u64 hv_tsc_page;
......
...@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
return; return;
mutex_lock(&kvm->arch.hyperv.hv_lock);
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
goto out_unlock;
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
/* /*
* Because the TSC parameters only vary when there is a * Because the TSC parameters only vary when there is a
...@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
*/ */
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
&tsc_seq, sizeof(tsc_seq)))) &tsc_seq, sizeof(tsc_seq))))
return; goto out_unlock;
/* /*
* While we're computing and writing the parameters, force the * While we're computing and writing the parameters, force the
...@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = 0; hv->tsc_ref.tsc_sequence = 0;
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
return; goto out_unlock;
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
return; goto out_unlock;
/* Ensure sequence is zero before writing the rest of the struct. */ /* Ensure sequence is zero before writing the rest of the struct. */
smp_wmb(); smp_wmb();
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
return; goto out_unlock;
/* /*
* Now switch to the TSC page mechanism by writing the sequence. * Now switch to the TSC page mechanism by writing the sequence.
...@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = tsc_seq; hv->tsc_ref.tsc_sequence = tsc_seq;
kvm_write_guest(kvm, gfn_to_gpa(gfn), kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
out_unlock:
mutex_unlock(&kvm->arch.hyperv.hv_lock);
} }
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
...@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (kvm_hv_msr_partition_wide(msr)) { if (kvm_hv_msr_partition_wide(msr)) {
int r; int r;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host); r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r; return r;
} else } else
return kvm_hv_set_msr(vcpu, msr, data, host); return kvm_hv_set_msr(vcpu, msr, data, host);
...@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_hv_msr_partition_wide(msr)) { if (kvm_hv_msr_partition_wide(msr)) {
int r; int r;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata); r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r; return r;
} else } else
return kvm_hv_get_msr(vcpu, msr, pdata); return kvm_hv_get_msr(vcpu, msr, pdata);
...@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
bool kvm_hv_hypercall_enabled(struct kvm *kvm) bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{ {
return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
} }
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
......
...@@ -7881,6 +7881,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -7881,6 +7881,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
raw_spin_lock_init(&kvm->arch.tsc_write_lock); raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock); mutex_init(&kvm->arch.apic_map_lock);
mutex_init(&kvm->arch.hyperv.hv_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment