Commit fe3eb504 authored by Ilias Stamatis's avatar Ilias Stamatis Committed by Paolo Bonzini

KVM: X86: Add a ratio parameter to kvm_scale_tsc()

Sometimes kvm_scale_tsc() needs to use the current scaling ratio and
other times (like when reading the TSC from user space) it needs to use
L1's scaling ratio. Have the caller specify this by passing the ratio as
a parameter.
Signed-off-by: default avatarIlias Stamatis <ilstam@amazon.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210526184418.28881-5-ilstam@amazon.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9b399dfd
...@@ -1795,7 +1795,7 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr) ...@@ -1795,7 +1795,7 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
return kvm_find_user_return_msr(msr) >= 0; return kvm_find_user_return_msr(msr) >= 0;
} }
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
......
...@@ -2307,10 +2307,9 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc) ...@@ -2307,10 +2307,9 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits); return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
} }
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
{ {
u64 _tsc = tsc; u64 _tsc = tsc;
u64 ratio = vcpu->arch.tsc_scaling_ratio;
if (ratio != kvm_default_tsc_scaling_ratio) if (ratio != kvm_default_tsc_scaling_ratio)
_tsc = __scale_tsc(ratio, tsc); _tsc = __scale_tsc(ratio, tsc);
...@@ -2323,14 +2322,15 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -2323,14 +2322,15 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
u64 tsc; u64 tsc;
tsc = kvm_scale_tsc(vcpu, rdtsc()); tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
return target_tsc - tsc; return target_tsc - tsc;
} }
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{ {
return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); return vcpu->arch.l1_tsc_offset +
kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
} }
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
...@@ -2463,7 +2463,8 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) ...@@ -2463,7 +2463,8 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{ {
if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0); WARN_ON(adjustment < 0);
adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
vcpu->arch.l1_tsc_scaling_ratio);
adjust_tsc_offset_guest(vcpu, adjustment); adjust_tsc_offset_guest(vcpu, adjustment);
} }
...@@ -2846,7 +2847,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -2846,7 +2847,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */ /* With all the info we got, fill in the values */
if (kvm_has_tsc_control) if (kvm_has_tsc_control)
tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz); tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
v->arch.l1_tsc_scaling_ratio);
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
...@@ -3554,10 +3556,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3554,10 +3556,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* return L1's TSC value to ensure backwards-compatible * return L1's TSC value to ensure backwards-compatible
* behavior for migration. * behavior for migration.
*/ */
u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : u64 offset, ratio;
vcpu->arch.tsc_offset;
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; if (msr_info->host_initiated) {
offset = vcpu->arch.l1_tsc_offset;
ratio = vcpu->arch.l1_tsc_scaling_ratio;
} else {
offset = vcpu->arch.tsc_offset;
ratio = vcpu->arch.tsc_scaling_ratio;
}
msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
break; break;
} }
case MSR_MTRRcap: case MSR_MTRRcap:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment