Commit bd3d394e authored by Paolo Bonzini's avatar Paolo Bonzini

x86, KVM: remove unnecessary argument to x86_virt_spec_ctrl and callers

x86_virt_spec_ctrl only deals with the paravirtualized
MSR_IA32_VIRT_SPEC_CTRL now and does not handle MSR_IA32_SPEC_CTRL
anymore; remove the corresponding, unused argument.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9f2febf3
......@@ -13,7 +13,7 @@
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
* the guest's version of VIRT_SPEC_CTRL, if emulated.
*/
extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
extern void x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool guest);
/**
* x86_spec_ctrl_set_guest - Set speculation control registers for the guest
......@@ -24,9 +24,9 @@ extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bo
* Avoids writing to the MSR if the content/bits are the same
*/
static inline
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
void x86_spec_ctrl_set_guest(u64 guest_virt_spec_ctrl)
{
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
x86_virt_spec_ctrl(guest_virt_spec_ctrl, true);
}
/**
......@@ -38,9 +38,9 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
* Avoids writing to the MSR if the content/bits are the same
*/
static inline
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
void x86_spec_ctrl_restore_host(u64 guest_virt_spec_ctrl)
{
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
x86_virt_spec_ctrl(guest_virt_spec_ctrl, false);
}
/* AMD specific Speculative Store Bypass MSR data */
......
......@@ -200,7 +200,7 @@ void __init check_bugs(void)
* MSR_IA32_SPEC_CTRL for SSBD.
*/
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
{
u64 guestval, hostval;
struct thread_info *ti = current_thread_info();
......
......@@ -3984,7 +3984,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
......@@ -3992,7 +3992,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
reload_tss(vcpu);
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
if (!sev_es_guest(vcpu->kvm)) {
vcpu->arch.cr2 = svm->vmcb->save.cr2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment