Commit 62711e5a authored by Jinrong Liang's avatar Jinrong Liang Committed by Paolo Bonzini

KVM: x86: Remove unused "vcpu" of kvm_scale_tsc()

The "struct kvm_vcpu *vcpu" parameter of kvm_scale_tsc() is not used,
so remove it. No functional change intended.
Signed-off-by: default avatarJinrong Liang <cloudliang@tencent.com>
Message-Id: <20220125095909.38122-18-cloudliang@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7127fd36
...@@ -1878,7 +1878,7 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr) ...@@ -1878,7 +1878,7 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
return kvm_find_user_return_msr(msr) >= 0; return kvm_find_user_return_msr(msr) >= 0;
} }
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio); u64 kvm_scale_tsc(u64 tsc, u64 ratio);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier); u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
......
...@@ -2399,7 +2399,7 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc) ...@@ -2399,7 +2399,7 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits); return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
} }
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio) u64 kvm_scale_tsc(u64 tsc, u64 ratio)
{ {
u64 _tsc = tsc; u64 _tsc = tsc;
...@@ -2414,7 +2414,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -2414,7 +2414,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
u64 tsc; u64 tsc;
tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
return target_tsc - tsc; return target_tsc - tsc;
} }
...@@ -2422,7 +2422,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -2422,7 +2422,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{ {
return vcpu->arch.l1_tsc_offset + return vcpu->arch.l1_tsc_offset +
kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio); kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
} }
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
...@@ -2625,7 +2625,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) ...@@ -2625,7 +2625,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{ {
if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0); WARN_ON(adjustment < 0);
adjustment = kvm_scale_tsc(vcpu, (u64) adjustment, adjustment = kvm_scale_tsc((u64) adjustment,
vcpu->arch.l1_tsc_scaling_ratio); vcpu->arch.l1_tsc_scaling_ratio);
adjust_tsc_offset_guest(vcpu, adjustment); adjust_tsc_offset_guest(vcpu, adjustment);
} }
...@@ -3045,7 +3045,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -3045,7 +3045,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */ /* With all the info we got, fill in the values */
if (kvm_has_tsc_control) if (kvm_has_tsc_control)
tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz, tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
v->arch.l1_tsc_scaling_ratio); v->arch.l1_tsc_scaling_ratio);
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
...@@ -3857,7 +3857,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3857,7 +3857,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
ratio = vcpu->arch.tsc_scaling_ratio; ratio = vcpu->arch.tsc_scaling_ratio;
} }
msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset; msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
break; break;
} }
case MSR_MTRRcap: case MSR_MTRRcap:
...@@ -5132,7 +5132,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, ...@@ -5132,7 +5132,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
kvm->arch.last_tsc_offset == offset); kvm->arch.last_tsc_offset == offset);
tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
ns = get_kvmclock_base_ns(); ns = get_kvmclock_base_ns();
__kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment