Commit 307a94c7 authored by Ilias Stamatis's avatar Ilias Stamatis Committed by Paolo Bonzini

KVM: X86: Add functions for retrieving L2 TSC fields from common code

In order to implement as much of the nested TSC scaling logic as
possible in common code, we need these vendor callbacks for retrieving
the TSC offset and the TSC multiplier that L1 has set for L2.
Signed-off-by: default avatarIlias Stamatis <ilstam@amazon.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210526184418.28881-7-ilstam@amazon.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3c0f9936
...@@ -87,6 +87,8 @@ KVM_X86_OP(set_identity_map_addr) ...@@ -87,6 +87,8 @@ KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask) KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd) KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_NULL(has_wbinvd_exit) KVM_X86_OP_NULL(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset)
KVM_X86_OP(get_l2_tsc_multiplier)
KVM_X86_OP(write_l1_tsc_offset) KVM_X86_OP(write_l1_tsc_offset)
KVM_X86_OP(get_exit_info) KVM_X86_OP(get_exit_info)
KVM_X86_OP(check_intercept) KVM_X86_OP(check_intercept)
......
...@@ -1311,6 +1311,8 @@ struct kvm_x86_ops { ...@@ -1311,6 +1311,8 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void); bool (*has_wbinvd_exit)(void);
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
/* Returns actual tsc_offset set in active VMCS */ /* Returns actual tsc_offset set in active VMCS */
u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
......
...@@ -1080,6 +1080,18 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) ...@@ -1080,6 +1080,18 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0; seg->base = 0;
} }
static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->nested.ctl.tsc_offset;
}
static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
{
return kvm_default_tsc_scaling_ratio;
}
static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -4524,6 +4536,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4524,6 +4536,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.has_wbinvd_exit = svm_has_wbinvd_exit, .has_wbinvd_exit = svm_has_wbinvd_exit,
.get_l2_tsc_offset = svm_get_l2_tsc_offset,
.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
.write_l1_tsc_offset = svm_write_l1_tsc_offset, .write_l1_tsc_offset = svm_write_l1_tsc_offset,
.load_mmu_pgd = svm_load_mmu_pgd, .load_mmu_pgd = svm_load_mmu_pgd,
......
...@@ -1787,6 +1787,27 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -1787,6 +1787,27 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->guest_uret_msrs_loaded = false; vmx->guest_uret_msrs_loaded = false;
} }
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
return vmcs12->tsc_offset;
return 0;
}
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
return vmcs12->tsc_multiplier;
return kvm_default_tsc_scaling_ratio;
}
static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
...@@ -7700,6 +7721,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7700,6 +7721,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
.write_l1_tsc_offset = vmx_write_l1_tsc_offset, .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
.load_mmu_pgd = vmx_load_mmu_pgd, .load_mmu_pgd = vmx_load_mmu_pgd,
......
...@@ -404,6 +404,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); ...@@ -404,6 +404,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value) int type, bool value)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment