Commit 1ab9287a authored by Ilias Stamatis's avatar Ilias Stamatis Committed by Paolo Bonzini

KVM: X86: Add vendor callbacks for writing the TSC multiplier

Currently vmx_vcpu_load_vmcs() writes the TSC_MULTIPLIER field of the
VMCS every time the VMCS is loaded. Instead of doing this, set this
field from common code on initialization and whenever the scaling ratio
changes.

Additionally remove vmx->current_tsc_ratio. This field is redundant as
vcpu->arch.tsc_scaling_ratio already tracks the current TSC scaling
ratio. The vmx->current_tsc_ratio field is only used for avoiding
unnecessary writes but it is no longer needed after removing the code
from the VMCS load path.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarIlias Stamatis <ilstam@amazon.com>
Message-Id: <20210607105438.16541-1-ilstam@amazon.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent edcfe540
...@@ -90,6 +90,7 @@ KVM_X86_OP_NULL(has_wbinvd_exit) ...@@ -90,6 +90,7 @@ KVM_X86_OP_NULL(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset) KVM_X86_OP(get_l2_tsc_offset)
KVM_X86_OP(get_l2_tsc_multiplier) KVM_X86_OP(get_l2_tsc_multiplier)
KVM_X86_OP(write_tsc_offset) KVM_X86_OP(write_tsc_offset)
KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info) KVM_X86_OP(get_exit_info)
KVM_X86_OP(check_intercept) KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff) KVM_X86_OP(handle_exit_irqoff)
......
...@@ -1314,6 +1314,7 @@ struct kvm_x86_ops { ...@@ -1314,6 +1314,7 @@ struct kvm_x86_ops {
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
/* /*
* Retrieve somewhat arbitrary exit information. Intended to be used * Retrieve somewhat arbitrary exit information. Intended to be used
......
...@@ -1101,6 +1101,11 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1101,6 +1101,11 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
} }
static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
{
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
}
/* Evaluate instruction intercepts that depend on guest CPUID features. */ /* Evaluate instruction intercepts that depend on guest CPUID features. */
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm) struct vcpu_svm *svm)
...@@ -4526,6 +4531,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4526,6 +4531,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_l2_tsc_offset = svm_get_l2_tsc_offset, .get_l2_tsc_offset = svm_get_l2_tsc_offset,
.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
.write_tsc_offset = svm_write_tsc_offset, .write_tsc_offset = svm_write_tsc_offset,
.write_tsc_multiplier = svm_write_tsc_multiplier,
.load_mmu_pgd = svm_load_mmu_pgd, .load_mmu_pgd = svm_load_mmu_pgd,
......
...@@ -2533,9 +2533,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2533,9 +2533,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
} }
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control) if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx); vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
...@@ -4501,12 +4500,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, ...@@ -4501,12 +4500,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
if (vmx->nested.l1_tpr_threshold != -1) if (vmx->nested.l1_tpr_threshold != -1)
vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (vmx->nested.change_vmcs01_virtual_apic_mode) { if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_apic_mode = false; vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_apic_mode(vcpu); vmx_set_virtual_apic_mode(vcpu);
......
...@@ -1390,11 +1390,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, ...@@ -1390,11 +1390,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vmx->loaded_vmcs->cpu = cpu; vmx->loaded_vmcs->cpu = cpu;
} }
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
decache_tsc_multiplier(vmx);
} }
/* /*
...@@ -1813,6 +1808,11 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1813,6 +1808,11 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcs_write64(TSC_OFFSET, offset); vmcs_write64(TSC_OFFSET, offset);
} }
static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
{
vmcs_write64(TSC_MULTIPLIER, multiplier);
}
/* /*
* nested_vmx_allowed() checks whether a guest should be allowed to use VMX * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
* instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
...@@ -7707,6 +7707,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7707,6 +7707,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_l2_tsc_offset = vmx_get_l2_tsc_offset, .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier, .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
.write_tsc_offset = vmx_write_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset,
.write_tsc_multiplier = vmx_write_tsc_multiplier,
.load_mmu_pgd = vmx_load_mmu_pgd, .load_mmu_pgd = vmx_load_mmu_pgd,
......
...@@ -322,8 +322,6 @@ struct vcpu_vmx { ...@@ -322,8 +322,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */ /* apic deadline value in host tsc */
u64 hv_deadline_tsc; u64 hv_deadline_tsc;
u64 current_tsc_ratio;
unsigned long host_debugctlmsr; unsigned long host_debugctlmsr;
/* /*
...@@ -532,12 +530,6 @@ static inline struct vmcs *alloc_vmcs(bool shadow) ...@@ -532,12 +530,6 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
GFP_KERNEL_ACCOUNT); GFP_KERNEL_ACCOUNT);
} }
static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
{
vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{ {
return vmx->secondary_exec_control & return vmx->secondary_exec_control &
......
...@@ -2179,14 +2179,15 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm) ...@@ -2179,14 +2179,15 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
return v; return v;
} }
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{ {
u64 ratio; u64 ratio;
/* Guest TSC same frequency as host TSC? */ /* Guest TSC same frequency as host TSC? */
if (!scale) { if (!scale) {
vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
return 0; return 0;
} }
...@@ -2212,7 +2213,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) ...@@ -2212,7 +2213,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
return -1; return -1;
} }
vcpu->arch.l1_tsc_scaling_ratio = vcpu->arch.tsc_scaling_ratio = ratio; kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
return 0; return 0;
} }
...@@ -2224,8 +2225,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) ...@@ -2224,8 +2225,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
/* tsc_khz can be zero if TSC calibration fails */ /* tsc_khz can be zero if TSC calibration fails */
if (user_tsc_khz == 0) { if (user_tsc_khz == 0) {
/* set tsc_scaling_ratio to a safe value */ /* set tsc_scaling_ratio to a safe value */
vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
return -1; return -1;
} }
...@@ -2383,6 +2383,23 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) ...@@ -2383,6 +2383,23 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
} }
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
{
vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
/* Userspace is changing the multiplier while L2 is active */
if (is_guest_mode(vcpu))
vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
l1_multiplier,
static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
else
vcpu->arch.tsc_scaling_ratio = l1_multiplier;
if (kvm_has_tsc_control)
static_call(kvm_x86_write_tsc_multiplier)(
vcpu, vcpu->arch.tsc_scaling_ratio);
}
static inline bool kvm_check_tsc_unstable(void) static inline bool kvm_check_tsc_unstable(void)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -10364,8 +10381,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -10364,8 +10381,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
else else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu); r = kvm_mmu_create(vcpu);
if (r < 0) if (r < 0)
return r; return r;
...@@ -10433,6 +10448,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -10433,6 +10448,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu); kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu); vcpu_load(vcpu);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
kvm_vcpu_reset(vcpu, false); kvm_vcpu_reset(vcpu, false);
kvm_init_mmu(vcpu, false); kvm_init_mmu(vcpu, false);
vcpu_put(vcpu); vcpu_put(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment