Commit f2bc14b6 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Prepare to meet unallocated Hyper-V context

Currently, Hyper-V context is part of 'struct kvm_vcpu_arch' and is always
available. As a preparation to allocating it dynamically, check that it is
not NULL at call sites which can normally proceed without it i.e. the
behavior is identical to the situation when Hyper-V emulation is not being
used by the guest.

When Hyper-V context for a particular vCPU is not allocated, we may still
need to get 'vp_index' from there. E.g. in a hypothetical situation when
Hyper-V emulation was enabled on one CPU and wasn't on another, Hyper-V
style send-IPI hypercall may still be used. Luckily, vp_index is always
initialized to kvm_vcpu_get_idx() and can only be changed when Hyper-V
context is present. Introduce kvm_hv_get_vpindex() helper for
simplification.

No functional change intended.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-12-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9ff5e030
...@@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) ...@@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
return NULL; return NULL;
vcpu = kvm_get_vcpu(kvm, vpidx); vcpu = kvm_get_vcpu(kvm, vpidx);
if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx) if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
return vcpu; return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
if (to_hv_vcpu(vcpu)->vp_index == vpidx) if (kvm_hv_get_vpindex(vcpu) == vpidx)
return vcpu; return vcpu;
return NULL; return NULL;
} }
...@@ -377,9 +377,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) ...@@ -377,9 +377,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
break; break;
} }
trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
to_hv_vcpu(vcpu)->vp_index, msr,
*pdata);
return 0; return 0;
} }
...@@ -806,6 +804,9 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) ...@@ -806,6 +804,9 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
u64 time_now, exp_time; u64 time_now, exp_time;
int i; int i;
if (!hv_vcpu)
return;
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
stimer = &hv_vcpu->stimer[i]; stimer = &hv_vcpu->stimer[i];
...@@ -842,6 +843,9 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) ...@@ -842,6 +843,9 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
if (!hv_vcpu)
return false;
if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
return false; return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
...@@ -1504,8 +1508,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( ...@@ -1504,8 +1508,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_bit(to_hv_vcpu(vcpu)->vp_index, if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
(unsigned long *)vp_bitmap))
__set_bit(i, vcpu_bitmap); __set_bit(i, vcpu_bitmap);
} }
return vcpu_bitmap; return vcpu_bitmap;
......
...@@ -83,6 +83,13 @@ static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) ...@@ -83,6 +83,13 @@ static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
return &vcpu->kvm->arch.hyperv.hv_syndbg; return &vcpu->kvm->arch.hyperv.hv_syndbg;
} }
static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
}
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
...@@ -121,6 +128,9 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) ...@@ -121,6 +128,9 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
if (!hv_vcpu)
return false;
return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
HV_SYNIC_STIMER_COUNT); HV_SYNIC_STIMER_COUNT);
} }
......
...@@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic) ...@@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic)
apic_clear_isr(vector, apic); apic_clear_isr(vector, apic);
apic_update_ppr(apic); apic_update_ppr(apic);
if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) if (to_hv_vcpu(apic->vcpu) &&
test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
kvm_hv_synic_send_eoi(apic->vcpu, vector); kvm_hv_synic_send_eoi(apic->vcpu, vector);
kvm_ioapic_send_eoi(apic, vector); kvm_ioapic_send_eoi(apic, vector);
...@@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) ...@@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
*/ */
apic_clear_irr(vector, apic); apic_clear_irr(vector, apic);
if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
/* /*
* For auto-EOI interrupts, there might be another pending * For auto-EOI interrupts, there might be another pending
* interrupt above PPR, so check whether to raise another * interrupt above PPR, so check whether to raise another
......
...@@ -6810,12 +6810,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6810,12 +6810,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* All fields are clean at this point */ /* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs)) { if (static_branch_unlikely(&enable_evmcs)) {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
current_evmcs->hv_clean_fields |= current_evmcs->hv_clean_fields |=
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
current_evmcs->hv_vp_id = hv_vcpu->vp_index; current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
} }
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
......
...@@ -8803,8 +8803,11 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) ...@@ -8803,8 +8803,11 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
if (!kvm_apic_hw_enabled(vcpu->arch.apic)) if (!kvm_apic_hw_enabled(vcpu->arch.apic))
return; return;
bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, if (to_hv_vcpu(vcpu))
to_hv_synic(vcpu)->vec_bitmap, 256); bitmap_or((ulong *)eoi_exit_bitmap,
vcpu->arch.ioapic_handled_vectors,
to_hv_synic(vcpu)->vec_bitmap, 256);
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment