Commit 0617a769 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Rename virtualization {en,dis}abling APIs to match common KVM

Rename x86's the per-CPU vendor hooks used to enable virtualization in
hardware to align with the recently renamed arch hooks.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarKai Huang <kai.huang@intel.com>
Message-ID: <20240830043600.127750-7-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5381eca1
...@@ -14,8 +14,8 @@ BUILD_BUG_ON(1) ...@@ -14,8 +14,8 @@ BUILD_BUG_ON(1)
* be __static_call_return0. * be __static_call_return0.
*/ */
KVM_X86_OP(check_processor_compatibility) KVM_X86_OP(check_processor_compatibility)
KVM_X86_OP(hardware_enable) KVM_X86_OP(enable_virtualization_cpu)
KVM_X86_OP(hardware_disable) KVM_X86_OP(disable_virtualization_cpu)
KVM_X86_OP(hardware_unsetup) KVM_X86_OP(hardware_unsetup)
KVM_X86_OP(has_emulated_msr) KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid) KVM_X86_OP(vcpu_after_set_cpuid)
......
...@@ -1629,8 +1629,8 @@ struct kvm_x86_ops { ...@@ -1629,8 +1629,8 @@ struct kvm_x86_ops {
int (*check_processor_compatibility)(void); int (*check_processor_compatibility)(void);
int (*hardware_enable)(void); int (*enable_virtualization_cpu)(void);
void (*hardware_disable)(void); void (*disable_virtualization_cpu)(void);
void (*hardware_unsetup)(void); void (*hardware_unsetup)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index); bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
......
...@@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void) ...@@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void)
} }
} }
static void svm_emergency_disable(void) static void svm_emergency_disable_virtualization_cpu(void)
{ {
kvm_rebooting = true; kvm_rebooting = true;
kvm_cpu_svm_disable(); kvm_cpu_svm_disable();
} }
static void svm_hardware_disable(void) static void svm_disable_virtualization_cpu(void)
{ {
/* Make sure we clean up behind us */ /* Make sure we clean up behind us */
if (tsc_scaling) if (tsc_scaling)
...@@ -610,7 +610,7 @@ static void svm_hardware_disable(void) ...@@ -610,7 +610,7 @@ static void svm_hardware_disable(void)
amd_pmu_disable_virt(); amd_pmu_disable_virt();
} }
static int svm_hardware_enable(void) static int svm_enable_virtualization_cpu(void)
{ {
struct svm_cpu_data *sd; struct svm_cpu_data *sd;
...@@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* TSC_AUX is always virtualized for SEV-ES guests when the feature is * TSC_AUX is always virtualized for SEV-ES guests when the feature is
* available. The user return MSR support is not required in this case * available. The user return MSR support is not required in this case
* because TSC_AUX is restored on #VMEXIT from the host save area * because TSC_AUX is restored on #VMEXIT from the host save area
* (which has been initialized in svm_hardware_enable()). * (which has been initialized in svm_enable_virtualization_cpu()).
*/ */
if (likely(tsc_aux_uret_slot >= 0) && if (likely(tsc_aux_uret_slot >= 0) &&
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
...@@ -3132,7 +3132,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -3132,7 +3132,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* feature is available. The user return MSR support is not * feature is available. The user return MSR support is not
* required in this case because TSC_AUX is restored on #VMEXIT * required in this case because TSC_AUX is restored on #VMEXIT
* from the host save area (which has been initialized in * from the host save area (which has been initialized in
* svm_hardware_enable()). * svm_enable_virtualization_cpu()).
*/ */
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm)) if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
break; break;
...@@ -4980,8 +4980,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4980,8 +4980,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.check_processor_compatibility = svm_check_processor_compat, .check_processor_compatibility = svm_check_processor_compat,
.hardware_unsetup = svm_hardware_unsetup, .hardware_unsetup = svm_hardware_unsetup,
.hardware_enable = svm_hardware_enable, .enable_virtualization_cpu = svm_enable_virtualization_cpu,
.hardware_disable = svm_hardware_disable, .disable_virtualization_cpu = svm_disable_virtualization_cpu,
.has_emulated_msr = svm_has_emulated_msr, .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_vcpu_create, .vcpu_create = svm_vcpu_create,
...@@ -5411,7 +5411,7 @@ static void __svm_exit(void) ...@@ -5411,7 +5411,7 @@ static void __svm_exit(void)
{ {
kvm_x86_vendor_exit(); kvm_x86_vendor_exit();
cpu_emergency_unregister_virt_callback(svm_emergency_disable); cpu_emergency_unregister_virt_callback(svm_emergency_disable_virtualization_cpu);
} }
static int __init svm_init(void) static int __init svm_init(void)
...@@ -5427,7 +5427,7 @@ static int __init svm_init(void) ...@@ -5427,7 +5427,7 @@ static int __init svm_init(void)
if (r) if (r)
return r; return r;
cpu_emergency_register_virt_callback(svm_emergency_disable); cpu_emergency_register_virt_callback(svm_emergency_disable_virtualization_cpu);
/* /*
* Common KVM initialization _must_ come last, after this, /dev/kvm is * Common KVM initialization _must_ come last, after this, /dev/kvm is
......
...@@ -23,8 +23,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { ...@@ -23,8 +23,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.hardware_unsetup = vmx_hardware_unsetup, .hardware_unsetup = vmx_hardware_unsetup,
.hardware_enable = vmx_hardware_enable, .enable_virtualization_cpu = vmx_enable_virtualization_cpu,
.hardware_disable = vmx_hardware_disable, .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
.has_emulated_msr = vmx_has_emulated_msr, .has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx), .vm_size = sizeof(struct kvm_vmx),
......
...@@ -755,7 +755,7 @@ static int kvm_cpu_vmxoff(void) ...@@ -755,7 +755,7 @@ static int kvm_cpu_vmxoff(void)
return -EIO; return -EIO;
} }
static void vmx_emergency_disable(void) static void vmx_emergency_disable_virtualization_cpu(void)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
struct loaded_vmcs *v; struct loaded_vmcs *v;
...@@ -2844,7 +2844,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) ...@@ -2844,7 +2844,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
return -EFAULT; return -EFAULT;
} }
int vmx_hardware_enable(void) int vmx_enable_virtualization_cpu(void)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
...@@ -2881,7 +2881,7 @@ static void vmclear_local_loaded_vmcss(void) ...@@ -2881,7 +2881,7 @@ static void vmclear_local_loaded_vmcss(void)
__loaded_vmcs_clear(v); __loaded_vmcs_clear(v);
} }
void vmx_hardware_disable(void) void vmx_disable_virtualization_cpu(void)
{ {
vmclear_local_loaded_vmcss(); vmclear_local_loaded_vmcss();
...@@ -8584,7 +8584,7 @@ static void __vmx_exit(void) ...@@ -8584,7 +8584,7 @@ static void __vmx_exit(void)
{ {
allow_smaller_maxphyaddr = false; allow_smaller_maxphyaddr = false;
cpu_emergency_unregister_virt_callback(vmx_emergency_disable); cpu_emergency_unregister_virt_callback(vmx_emergency_disable_virtualization_cpu);
vmx_cleanup_l1d_flush(); vmx_cleanup_l1d_flush();
} }
...@@ -8632,7 +8632,7 @@ static int __init vmx_init(void) ...@@ -8632,7 +8632,7 @@ static int __init vmx_init(void)
pi_init_cpu(cpu); pi_init_cpu(cpu);
} }
cpu_emergency_register_virt_callback(vmx_emergency_disable); cpu_emergency_register_virt_callback(vmx_emergency_disable_virtualization_cpu);
vmx_check_vmcs12_offsets(); vmx_check_vmcs12_offsets();
......
...@@ -13,8 +13,8 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata; ...@@ -13,8 +13,8 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata;
void vmx_hardware_unsetup(void); void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void); int vmx_check_processor_compat(void);
int vmx_hardware_enable(void); int vmx_enable_virtualization_cpu(void);
void vmx_hardware_disable(void); void vmx_disable_virtualization_cpu(void);
int vmx_vm_init(struct kvm *kvm); int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm); void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm); int vmx_vcpu_precreate(struct kvm *kvm);
......
...@@ -9749,7 +9749,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) ...@@ -9749,7 +9749,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
guard(mutex)(&vendor_module_lock); guard(mutex)(&vendor_module_lock);
if (kvm_x86_ops.hardware_enable) { if (kvm_x86_ops.enable_virtualization_cpu) {
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name); pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
return -EEXIST; return -EEXIST;
} }
...@@ -9876,7 +9876,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) ...@@ -9876,7 +9876,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return 0; return 0;
out_unwind_ops: out_unwind_ops:
kvm_x86_ops.hardware_enable = NULL; kvm_x86_ops.enable_virtualization_cpu = NULL;
kvm_x86_call(hardware_unsetup)(); kvm_x86_call(hardware_unsetup)();
out_mmu_exit: out_mmu_exit:
kvm_mmu_vendor_module_exit(); kvm_mmu_vendor_module_exit();
...@@ -9917,7 +9917,7 @@ void kvm_x86_vendor_exit(void) ...@@ -9917,7 +9917,7 @@ void kvm_x86_vendor_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif #endif
mutex_lock(&vendor_module_lock); mutex_lock(&vendor_module_lock);
kvm_x86_ops.hardware_enable = NULL; kvm_x86_ops.enable_virtualization_cpu = NULL;
mutex_unlock(&vendor_module_lock); mutex_unlock(&vendor_module_lock);
} }
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
...@@ -12528,7 +12528,7 @@ int kvm_arch_enable_virtualization_cpu(void) ...@@ -12528,7 +12528,7 @@ int kvm_arch_enable_virtualization_cpu(void)
if (ret) if (ret)
return ret; return ret;
ret = kvm_x86_call(hardware_enable)(); ret = kvm_x86_call(enable_virtualization_cpu)();
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -12610,7 +12610,7 @@ int kvm_arch_enable_virtualization_cpu(void) ...@@ -12610,7 +12610,7 @@ int kvm_arch_enable_virtualization_cpu(void)
void kvm_arch_disable_virtualization_cpu(void) void kvm_arch_disable_virtualization_cpu(void)
{ {
kvm_x86_call(hardware_disable)(); kvm_x86_call(disable_virtualization_cpu)();
drop_user_return_notifiers(); drop_user_return_notifiers();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment