Commit b4f69df0 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Sean Christopherson

KVM: x86: Make Hyper-V emulation optional

Hyper-V emulation in KVM is a fairly big chunk and in some cases it may be
desirable to not compile it in to reduce module sizes as well as the attack
surface. Introduce CONFIG_KVM_HYPERV option to make it possible.

Note, there's room for further nVMX/nSVM code optimizations when
!CONFIG_KVM_HYPERV, this will be done in follow-up patches.

Reorganize Makefile a bit so all CONFIG_HYPERV and CONFIG_KVM_HYPERV files
are grouped together.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Tested-by: default avatarJeremi Piotrowski <jpiotrowski@linux.microsoft.com>
Link: https://lore.kernel.org/r/20231205103630.1391318-13-vkuznets@redhat.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent f9731462
...@@ -937,8 +937,10 @@ struct kvm_vcpu_arch { ...@@ -937,8 +937,10 @@ struct kvm_vcpu_arch {
/* used for guest single stepping over the given code position */ /* used for guest single stepping over the given code position */
unsigned long singlestep_rip; unsigned long singlestep_rip;
#ifdef CONFIG_KVM_HYPERV
bool hyperv_enabled; bool hyperv_enabled;
struct kvm_vcpu_hv *hyperv; struct kvm_vcpu_hv *hyperv;
#endif
#ifdef CONFIG_KVM_XEN #ifdef CONFIG_KVM_XEN
struct kvm_vcpu_xen xen; struct kvm_vcpu_xen xen;
#endif #endif
...@@ -1095,6 +1097,7 @@ enum hv_tsc_page_status { ...@@ -1095,6 +1097,7 @@ enum hv_tsc_page_status {
HV_TSC_PAGE_BROKEN, HV_TSC_PAGE_BROKEN,
}; };
#ifdef CONFIG_KVM_HYPERV
/* Hyper-V emulation context */ /* Hyper-V emulation context */
struct kvm_hv { struct kvm_hv {
struct mutex hv_lock; struct mutex hv_lock;
...@@ -1127,6 +1130,7 @@ struct kvm_hv { ...@@ -1127,6 +1130,7 @@ struct kvm_hv {
struct kvm_hv_syndbg hv_syndbg; struct kvm_hv_syndbg hv_syndbg;
}; };
#endif
struct msr_bitmap_range { struct msr_bitmap_range {
u32 flags; u32 flags;
...@@ -1349,7 +1353,9 @@ struct kvm_arch { ...@@ -1349,7 +1353,9 @@ struct kvm_arch {
/* reads protected by irq_srcu, writes by irq_lock */ /* reads protected by irq_srcu, writes by irq_lock */
struct hlist_head mask_notifier_list; struct hlist_head mask_notifier_list;
#ifdef CONFIG_KVM_HYPERV
struct kvm_hv hyperv; struct kvm_hv hyperv;
#endif
#ifdef CONFIG_KVM_XEN #ifdef CONFIG_KVM_XEN
struct kvm_xen xen; struct kvm_xen xen;
......
...@@ -141,6 +141,20 @@ config KVM_SMM ...@@ -141,6 +141,20 @@ config KVM_SMM
If unsure, say Y. If unsure, say Y.
config KVM_HYPERV
bool "Support for Microsoft Hyper-V emulation"
depends on KVM
default y
help
Provides KVM support for emulating Microsoft Hyper-V. This allows KVM
to expose a subset of the paravirtualized interfaces defined in the
Hyper-V Hypervisor Top-Level Functional Specification (TLFS):
https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
These interfaces are required for the correct and performant functioning
of Windows and Hyper-V guests on KVM.
If unsure, say "Y".
config KVM_XEN config KVM_XEN
bool "Support for Xen hypercall interface" bool "Support for Xen hypercall interface"
depends on KVM depends on KVM
......
...@@ -11,29 +11,27 @@ include $(srctree)/virt/kvm/Makefile.kvm ...@@ -11,29 +11,27 @@ include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ debugfs.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o mmu/spte.o
ifdef CONFIG_HYPERV
kvm-y += kvm_onhyperv.o
endif
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_HYPERV) += hyperv.o
kvm-$(CONFIG_KVM_XEN) += xen.o kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-$(CONFIG_KVM_SMM) += smm.o kvm-$(CONFIG_KVM_SMM) += smm.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/hyperv.o vmx/hyperv_evmcs.o vmx/nested.o vmx/posted_intr.o vmx/nested.o vmx/posted_intr.o
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
ifdef CONFIG_HYPERV kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
kvm-intel-y += vmx/vmx_onhyperv.o kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
endif
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \ kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
svm/sev.o svm/hyperv.o svm/sev.o
kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
ifdef CONFIG_HYPERV ifdef CONFIG_HYPERV
kvm-y += kvm_onhyperv.o
kvm-intel-y += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o
kvm-amd-y += svm/svm_onhyperv.o kvm-amd-y += svm/svm_onhyperv.o
endif endif
......
...@@ -314,11 +314,15 @@ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); ...@@ -314,11 +314,15 @@ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent) static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
{ {
#ifdef CONFIG_KVM_HYPERV
struct kvm_cpuid_entry2 *entry; struct kvm_cpuid_entry2 *entry;
entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
KVM_CPUID_INDEX_NOT_SIGNIFICANT); KVM_CPUID_INDEX_NOT_SIGNIFICANT);
return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
#else
return false;
#endif
} }
static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
...@@ -433,11 +437,13 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, ...@@ -433,11 +437,13 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
return 0; return 0;
} }
#ifdef CONFIG_KVM_HYPERV
if (kvm_cpuid_has_hyperv(e2, nent)) { if (kvm_cpuid_has_hyperv(e2, nent)) {
r = kvm_hv_vcpu_init(vcpu); r = kvm_hv_vcpu_init(vcpu);
if (r) if (r)
return r; return r;
} }
#endif
r = kvm_check_cpuid(vcpu, e2, nent); r = kvm_check_cpuid(vcpu, e2, nent);
if (r) if (r)
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "x86.h" #include "x86.h"
#ifdef CONFIG_KVM_HYPERV
/* "Hv#1" signature */ /* "Hv#1" signature */
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
...@@ -261,5 +263,62 @@ static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, ...@@ -261,5 +263,62 @@ static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
} }
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu); int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
#else /* CONFIG_KVM_HYPERV */
static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock) {}
static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
static inline void kvm_hv_init_vm(struct kvm *kvm) {}
static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
{
return false;
}
static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
return HV_STATUS_ACCESS_DENIED;
}
static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
{
return false;
}
static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
{
return false;
}
static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{
return false;
}
static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
{
return false;
}
static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
{
return false;
}
static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
{
return vcpu->vcpu_idx;
}
static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
#endif /* CONFIG_KVM_HYPERV */
#endif #endif /* __ARCH_X86_KVM_HYPERV_H__ */
...@@ -144,7 +144,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, ...@@ -144,7 +144,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
} }
#ifdef CONFIG_KVM_HYPERV
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, struct kvm *kvm, int irq_source_id, int level,
bool line_status) bool line_status)
...@@ -154,6 +154,7 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, ...@@ -154,6 +154,7 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
} }
#endif
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, struct kvm *kvm, int irq_source_id, int level,
...@@ -163,9 +164,11 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, ...@@ -163,9 +164,11 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
int r; int r;
switch (e->type) { switch (e->type) {
#ifdef CONFIG_KVM_HYPERV
case KVM_IRQ_ROUTING_HV_SINT: case KVM_IRQ_ROUTING_HV_SINT:
return kvm_hv_set_sint(e, kvm, irq_source_id, level, return kvm_hv_set_sint(e, kvm, irq_source_id, level,
line_status); line_status);
#endif
case KVM_IRQ_ROUTING_MSI: case KVM_IRQ_ROUTING_MSI:
if (kvm_msi_route_invalid(kvm, e)) if (kvm_msi_route_invalid(kvm, e))
...@@ -314,11 +317,13 @@ int kvm_set_routing_entry(struct kvm *kvm, ...@@ -314,11 +317,13 @@ int kvm_set_routing_entry(struct kvm *kvm,
if (kvm_msi_route_invalid(kvm, e)) if (kvm_msi_route_invalid(kvm, e))
return -EINVAL; return -EINVAL;
break; break;
#ifdef CONFIG_KVM_HYPERV
case KVM_IRQ_ROUTING_HV_SINT: case KVM_IRQ_ROUTING_HV_SINT:
e->set = kvm_hv_set_sint; e->set = kvm_hv_set_sint;
e->hv_sint.vcpu = ue->u.hv_sint.vcpu; e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
e->hv_sint.sint = ue->u.hv_sint.sint; e->hv_sint.sint = ue->u.hv_sint.sint;
break; break;
#endif
#ifdef CONFIG_KVM_XEN #ifdef CONFIG_KVM_XEN
case KVM_IRQ_ROUTING_XEN_EVTCHN: case KVM_IRQ_ROUTING_XEN_EVTCHN:
return kvm_xen_setup_evtchn(kvm, e, ue); return kvm_xen_setup_evtchn(kvm, e, ue);
...@@ -438,5 +443,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, ...@@ -438,5 +443,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
void kvm_arch_irq_routing_update(struct kvm *kvm) void kvm_arch_irq_routing_update(struct kvm *kvm)
{ {
#ifdef CONFIG_KVM_HYPERV
kvm_hv_irq_routing_update(kvm); kvm_hv_irq_routing_update(kvm);
#endif
} }
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "../hyperv.h" #include "../hyperv.h"
#include "svm.h" #include "svm.h"
#ifdef CONFIG_KVM_HYPERV
static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -41,5 +42,13 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu) ...@@ -41,5 +42,13 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
} }
void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu); void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
#else /* CONFIG_KVM_HYPERV */
static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) {}
static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) {}
#endif /* CONFIG_KVM_HYPERV */
#endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */ #endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
...@@ -9,11 +9,6 @@ ...@@ -9,11 +9,6 @@
#define EVMPTR_INVALID (-1ULL) #define EVMPTR_INVALID (-1ULL)
#define EVMPTR_MAP_PENDING (-2ULL) #define EVMPTR_MAP_PENDING (-2ULL)
static inline bool evmptr_is_valid(u64 evmptr)
{
return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
}
enum nested_evmptrld_status { enum nested_evmptrld_status {
EVMPTRLD_DISABLED, EVMPTRLD_DISABLED,
EVMPTRLD_SUCCEEDED, EVMPTRLD_SUCCEEDED,
...@@ -21,6 +16,12 @@ enum nested_evmptrld_status { ...@@ -21,6 +16,12 @@ enum nested_evmptrld_status {
EVMPTRLD_ERROR, EVMPTRLD_ERROR,
}; };
#ifdef CONFIG_KVM_HYPERV
static inline bool evmptr_is_valid(u64 evmptr)
{
return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
}
static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu) static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
{ {
/* /*
...@@ -39,5 +40,11 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 * ...@@ -39,5 +40,11 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *
int nested_evmcs_check_controls(struct vmcs12 *vmcs12); int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu); bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu); void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
#else
static inline bool evmptr_is_valid(u64 evmptr)
{
return false;
}
#endif
#endif /* __KVM_X86_VMX_HYPERV_H */ #endif /* __KVM_X86_VMX_HYPERV_H */
...@@ -226,6 +226,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) ...@@ -226,6 +226,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_KVM_HYPERV
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -241,10 +242,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) ...@@ -241,10 +242,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
hv_vcpu->nested.vm_id = 0; hv_vcpu->nested.vm_id = 0;
hv_vcpu->nested.vp_id = 0; hv_vcpu->nested.vp_id = 0;
} }
#endif
} }
static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr) static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
{ {
#ifdef CONFIG_KVM_HYPERV
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
/* /*
* When Enlightened VMEntry is enabled on the calling CPU we treat * When Enlightened VMEntry is enabled on the calling CPU we treat
...@@ -264,6 +267,9 @@ static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr) ...@@ -264,6 +267,9 @@ static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
nested_release_evmcs(vcpu); nested_release_evmcs(vcpu);
return true; return true;
#else
return false;
#endif
} }
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
...@@ -1595,6 +1601,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) ...@@ -1595,6 +1601,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
{ {
#ifdef CONFIG_KVM_HYPERV
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
...@@ -1835,10 +1842,14 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields ...@@ -1835,10 +1842,14 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
*/ */
return; return;
#else /* CONFIG_KVM_HYPERV */
KVM_BUG_ON(1, vmx->vcpu.kvm);
#endif /* CONFIG_KVM_HYPERV */
} }
static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
{ {
#ifdef CONFIG_KVM_HYPERV
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
...@@ -2009,6 +2020,9 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) ...@@ -2009,6 +2020,9 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
return; return;
#else /* CONFIG_KVM_HYPERV */
KVM_BUG_ON(1, vmx->vcpu.kvm);
#endif /* CONFIG_KVM_HYPERV */
} }
/* /*
...@@ -2018,6 +2032,7 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) ...@@ -2018,6 +2032,7 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
struct kvm_vcpu *vcpu, bool from_launch) struct kvm_vcpu *vcpu, bool from_launch)
{ {
#ifdef CONFIG_KVM_HYPERV
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
bool evmcs_gpa_changed = false; bool evmcs_gpa_changed = false;
u64 evmcs_gpa; u64 evmcs_gpa;
...@@ -2099,6 +2114,9 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( ...@@ -2099,6 +2114,9 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
} }
return EVMPTRLD_SUCCEEDED; return EVMPTRLD_SUCCEEDED;
#else
return EVMPTRLD_DISABLED;
#endif
} }
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
...@@ -2905,8 +2923,10 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, ...@@ -2905,8 +2923,10 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
nested_check_vm_entry_controls(vcpu, vmcs12)) nested_check_vm_entry_controls(vcpu, vmcs12))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_KVM_HYPERV
if (guest_cpuid_has_evmcs(vcpu)) if (guest_cpuid_has_evmcs(vcpu))
return nested_evmcs_check_controls(vmcs12); return nested_evmcs_check_controls(vmcs12);
#endif
return 0; return 0;
} }
...@@ -3178,6 +3198,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) ...@@ -3178,6 +3198,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
#ifdef CONFIG_KVM_HYPERV
static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -3205,6 +3226,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) ...@@ -3205,6 +3226,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
return true; return true;
} }
#endif
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{ {
...@@ -3296,6 +3318,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -3296,6 +3318,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_KVM_HYPERV
/* /*
* Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
* in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
...@@ -3312,6 +3335,7 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) ...@@ -3312,6 +3335,7 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
return false; return false;
} }
#endif
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false; return false;
...@@ -4749,6 +4773,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, ...@@ -4749,6 +4773,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */ /* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending); WARN_ON_ONCE(vmx->nested.nested_run_pending);
#ifdef CONFIG_KVM_HYPERV
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
/* /*
* KVM_REQ_GET_NESTED_STATE_PAGES is also used to map * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
...@@ -4758,6 +4783,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, ...@@ -4758,6 +4783,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
*/ */
(void)nested_get_evmcs_page(vcpu); (void)nested_get_evmcs_page(vcpu);
} }
#endif
/* Service pending TLB flush requests for L2 before switching to L1. */ /* Service pending TLB flush requests for L2 before switching to L1. */
kvm_service_local_tlb_flush_requests(vcpu); kvm_service_local_tlb_flush_requests(vcpu);
...@@ -6212,11 +6238,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, ...@@ -6212,11 +6238,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
* Handle L2's bus locks in L0 directly. * Handle L2's bus locks in L0 directly.
*/ */
return true; return true;
#ifdef CONFIG_KVM_HYPERV
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCALL:
/* Hyper-V L2 TLB flush hypercall is handled by L0 */ /* Hyper-V L2 TLB flush hypercall is handled by L0 */
return guest_hv_cpuid_has_l2_tlb_flush(vcpu) && return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
nested_evmcs_l2_tlb_flush_enabled(vcpu) && nested_evmcs_l2_tlb_flush_enabled(vcpu) &&
kvm_hv_is_tlb_flush_hcall(vcpu); kvm_hv_is_tlb_flush_hcall(vcpu);
#endif
default: default:
break; break;
} }
...@@ -7100,7 +7128,9 @@ struct kvm_x86_nested_ops vmx_nested_ops = { ...@@ -7100,7 +7128,9 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.set_state = vmx_set_nested_state, .set_state = vmx_set_nested_state,
.get_nested_state_pages = vmx_get_nested_state_pages, .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer, .write_log_dirty = nested_vmx_write_pml_buffer,
#ifdef CONFIG_KVM_HYPERV
.enable_evmcs = nested_enable_evmcs, .enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version, .get_evmcs_version = nested_get_evmcs_version,
.hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush, .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush,
#endif
}; };
...@@ -2048,6 +2048,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2048,6 +2048,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
&msr_info->data)) &msr_info->data))
return 1; return 1;
#ifdef CONFIG_KVM_HYPERV
/* /*
* Enlightened VMCS v1 doesn't have certain VMCS fields but * Enlightened VMCS v1 doesn't have certain VMCS fields but
* instead of just ignoring the features, different Hyper-V * instead of just ignoring the features, different Hyper-V
...@@ -2058,6 +2059,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2058,6 +2059,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu)) if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
nested_evmcs_filter_control_msr(vcpu, msr_info->index, nested_evmcs_filter_control_msr(vcpu, msr_info->index,
&msr_info->data); &msr_info->data);
#endif
break; break;
case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_CTL:
if (!vmx_pt_mode_is_host_guest()) if (!vmx_pt_mode_is_host_guest())
......
...@@ -1504,6 +1504,8 @@ static unsigned num_msrs_to_save; ...@@ -1504,6 +1504,8 @@ static unsigned num_msrs_to_save;
static const u32 emulated_msrs_all[] = { static const u32 emulated_msrs_all[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
#ifdef CONFIG_KVM_HYPERV
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
...@@ -1521,6 +1523,7 @@ static const u32 emulated_msrs_all[] = { ...@@ -1521,6 +1523,7 @@ static const u32 emulated_msrs_all[] = {
HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
HV_X64_MSR_SYNDBG_PENDING_BUFFER, HV_X64_MSR_SYNDBG_PENDING_BUFFER,
#endif
MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
...@@ -4020,6 +4023,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4020,6 +4023,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* the need to ignore the workaround. * the need to ignore the workaround.
*/ */
break; break;
#ifdef CONFIG_KVM_HYPERV
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
case HV_X64_MSR_SYNDBG_OPTIONS: case HV_X64_MSR_SYNDBG_OPTIONS:
...@@ -4032,6 +4036,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4032,6 +4036,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case HV_X64_MSR_TSC_INVARIANT_CONTROL: case HV_X64_MSR_TSC_INVARIANT_CONTROL:
return kvm_hv_set_msr_common(vcpu, msr, data, return kvm_hv_set_msr_common(vcpu, msr, data,
msr_info->host_initiated); msr_info->host_initiated);
#endif
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr /* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail. * counterpart for further detail.
...@@ -4377,6 +4382,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4377,6 +4382,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/ */
msr_info->data = 0x20000000; msr_info->data = 0x20000000;
break; break;
#ifdef CONFIG_KVM_HYPERV
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
case HV_X64_MSR_SYNDBG_OPTIONS: case HV_X64_MSR_SYNDBG_OPTIONS:
...@@ -4390,6 +4396,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4390,6 +4396,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_hv_get_msr_common(vcpu, return kvm_hv_get_msr_common(vcpu,
msr_info->index, &msr_info->data, msr_info->index, &msr_info->data,
msr_info->host_initiated); msr_info->host_initiated);
#endif
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current /* This legacy MSR exists but isn't fully documented in current
* silicon. It is however accessed by winxp in very narrow * silicon. It is however accessed by winxp in very narrow
...@@ -4527,6 +4534,7 @@ static inline bool kvm_can_mwait_in_guest(void) ...@@ -4527,6 +4534,7 @@ static inline bool kvm_can_mwait_in_guest(void)
boot_cpu_has(X86_FEATURE_ARAT); boot_cpu_has(X86_FEATURE_ARAT);
} }
#ifdef CONFIG_KVM_HYPERV
static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 __user *cpuid_arg) struct kvm_cpuid2 __user *cpuid_arg)
{ {
...@@ -4547,6 +4555,7 @@ static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, ...@@ -4547,6 +4555,7 @@ static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
#endif
static bool kvm_is_vm_type_supported(unsigned long type) static bool kvm_is_vm_type_supported(unsigned long type)
{ {
...@@ -4580,9 +4589,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -4580,9 +4589,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT_STATE2: case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_VCPU_EVENTS: case KVM_CAP_VCPU_EVENTS:
#ifdef CONFIG_KVM_HYPERV
case KVM_CAP_HYPERV: case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN: case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_HYPERV_TIME:
case KVM_CAP_HYPERV_SYNIC: case KVM_CAP_HYPERV_SYNIC:
case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_SYNIC2:
case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_VP_INDEX:
...@@ -4592,6 +4603,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -4592,6 +4603,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_CPUID: case KVM_CAP_HYPERV_CPUID:
case KVM_CAP_HYPERV_ENFORCE_CPUID: case KVM_CAP_HYPERV_ENFORCE_CPUID:
case KVM_CAP_SYS_HYPERV_CPUID: case KVM_CAP_SYS_HYPERV_CPUID:
#endif
case KVM_CAP_PCI_SEGMENT: case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS: case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_X86_ROBUST_SINGLESTEP:
...@@ -4601,7 +4613,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -4601,7 +4613,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM: case KVM_CAP_READONLY_MEM:
case KVM_CAP_HYPERV_TIME:
case KVM_CAP_IOAPIC_POLARITY_IGNORED: case KVM_CAP_IOAPIC_POLARITY_IGNORED:
case KVM_CAP_TSC_DEADLINE_TIMER: case KVM_CAP_TSC_DEADLINE_TIMER:
case KVM_CAP_DISABLE_QUIRKS: case KVM_CAP_DISABLE_QUIRKS:
...@@ -4712,12 +4723,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -4712,12 +4723,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_x86_ops.nested_ops->get_state ? r = kvm_x86_ops.nested_ops->get_state ?
kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
break; break;
#ifdef CONFIG_KVM_HYPERV
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
r = kvm_x86_ops.enable_l2_tlb_flush != NULL; r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
break; break;
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
break; break;
#endif
case KVM_CAP_SMALLER_MAXPHYADDR: case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr; r = (int) allow_smaller_maxphyaddr;
break; break;
...@@ -4884,9 +4897,11 @@ long kvm_arch_dev_ioctl(struct file *filp, ...@@ -4884,9 +4897,11 @@ long kvm_arch_dev_ioctl(struct file *filp,
case KVM_GET_MSRS: case KVM_GET_MSRS:
r = msr_io(NULL, argp, do_get_msr_feature, 1); r = msr_io(NULL, argp, do_get_msr_feature, 1);
break; break;
#ifdef CONFIG_KVM_HYPERV
case KVM_GET_SUPPORTED_HV_CPUID: case KVM_GET_SUPPORTED_HV_CPUID:
r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
break; break;
#endif
case KVM_GET_DEVICE_ATTR: { case KVM_GET_DEVICE_ATTR: {
struct kvm_device_attr attr; struct kvm_device_attr attr;
r = -EFAULT; r = -EFAULT;
...@@ -5712,14 +5727,11 @@ static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, ...@@ -5712,14 +5727,11 @@ static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap) struct kvm_enable_cap *cap)
{ {
int r;
uint16_t vmcs_version;
void __user *user_ptr;
if (cap->flags) if (cap->flags)
return -EINVAL; return -EINVAL;
switch (cap->cap) { switch (cap->cap) {
#ifdef CONFIG_KVM_HYPERV
case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0]) if (cap->args[0])
return -EINVAL; return -EINVAL;
...@@ -5731,6 +5743,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, ...@@ -5731,6 +5743,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return kvm_hv_activate_synic(vcpu, cap->cap == return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2); KVM_CAP_HYPERV_SYNIC2);
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
{
int r;
uint16_t vmcs_version;
void __user *user_ptr;
if (!kvm_x86_ops.nested_ops->enable_evmcs) if (!kvm_x86_ops.nested_ops->enable_evmcs)
return -ENOTTY; return -ENOTTY;
r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
...@@ -5741,6 +5758,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, ...@@ -5741,6 +5758,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EFAULT; r = -EFAULT;
} }
return r; return r;
}
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
if (!kvm_x86_ops.enable_l2_tlb_flush) if (!kvm_x86_ops.enable_l2_tlb_flush)
return -ENOTTY; return -ENOTTY;
...@@ -5749,6 +5767,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, ...@@ -5749,6 +5767,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_ENFORCE_CPUID: case KVM_CAP_HYPERV_ENFORCE_CPUID:
return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
#endif
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0]; vcpu->arch.pv_cpuid.enforce = cap->args[0];
...@@ -6141,9 +6160,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -6141,9 +6160,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
break; break;
} }
#ifdef CONFIG_KVM_HYPERV
case KVM_GET_SUPPORTED_HV_CPUID: case KVM_GET_SUPPORTED_HV_CPUID:
r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
break; break;
#endif
#ifdef CONFIG_KVM_XEN #ifdef CONFIG_KVM_XEN
case KVM_XEN_VCPU_GET_ATTR: { case KVM_XEN_VCPU_GET_ATTR: {
struct kvm_xen_vcpu_attr xva; struct kvm_xen_vcpu_attr xva;
...@@ -7201,6 +7222,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) ...@@ -7201,6 +7222,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region); r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
break; break;
} }
#ifdef CONFIG_KVM_HYPERV
case KVM_HYPERV_EVENTFD: { case KVM_HYPERV_EVENTFD: {
struct kvm_hyperv_eventfd hvevfd; struct kvm_hyperv_eventfd hvevfd;
...@@ -7210,6 +7232,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) ...@@ -7210,6 +7232,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
break; break;
} }
#endif
case KVM_SET_PMU_EVENT_FILTER: case KVM_SET_PMU_EVENT_FILTER:
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
break; break;
...@@ -10588,19 +10611,20 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) ...@@ -10588,19 +10611,20 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{ {
u64 eoi_exit_bitmap[4];
if (!kvm_apic_hw_enabled(vcpu->arch.apic)) if (!kvm_apic_hw_enabled(vcpu->arch.apic))
return; return;
#ifdef CONFIG_KVM_HYPERV
if (to_hv_vcpu(vcpu)) { if (to_hv_vcpu(vcpu)) {
u64 eoi_exit_bitmap[4];
bitmap_or((ulong *)eoi_exit_bitmap, bitmap_or((ulong *)eoi_exit_bitmap,
vcpu->arch.ioapic_handled_vectors, vcpu->arch.ioapic_handled_vectors,
to_hv_synic(vcpu)->vec_bitmap, 256); to_hv_synic(vcpu)->vec_bitmap, 256);
static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
return; return;
} }
#endif
static_call_cond(kvm_x86_load_eoi_exitmap)( static_call_cond(kvm_x86_load_eoi_exitmap)(
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
} }
...@@ -10691,9 +10715,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10691,9 +10715,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* the flushes are considered "remote" and not "local" because * the flushes are considered "remote" and not "local" because
* the requests can be initiated from other vCPUs. * the requests can be initiated from other vCPUs.
*/ */
#ifdef CONFIG_KVM_HYPERV
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
kvm_hv_vcpu_flush_tlb(vcpu)) kvm_hv_vcpu_flush_tlb(vcpu))
kvm_vcpu_flush_tlb_guest(vcpu); kvm_vcpu_flush_tlb_guest(vcpu);
#endif
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
...@@ -10746,6 +10772,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10746,6 +10772,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu_load_eoi_exitmap(vcpu); vcpu_load_eoi_exitmap(vcpu);
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
kvm_vcpu_reload_apic_access_page(vcpu); kvm_vcpu_reload_apic_access_page(vcpu);
#ifdef CONFIG_KVM_HYPERV
if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
...@@ -10776,6 +10803,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10776,6 +10803,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/ */
if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
kvm_hv_process_stimers(vcpu); kvm_hv_process_stimers(vcpu);
#endif
if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
kvm_vcpu_update_apicv(vcpu); kvm_vcpu_update_apicv(vcpu);
if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment