Commit 9b399dfd authored by Ilias Stamatis's avatar Ilias Stamatis Committed by Paolo Bonzini

KVM: X86: Rename kvm_compute_tsc_offset() to kvm_compute_l1_tsc_offset()

All existing code uses kvm_compute_tsc_offset() passing L1 TSC values to
it. Let's document this by renaming it to kvm_compute_l1_tsc_offset().
Signed-off-by: default avatarIlias Stamatis <ilstam@amazon.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210526184418.28881-4-ilstam@amazon.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 805d705f
...@@ -2319,7 +2319,7 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) ...@@ -2319,7 +2319,7 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
} }
EXPORT_SYMBOL_GPL(kvm_scale_tsc); EXPORT_SYMBOL_GPL(kvm_scale_tsc);
static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
u64 tsc; u64 tsc;
...@@ -2363,7 +2363,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -2363,7 +2363,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
bool synchronizing = false; bool synchronizing = false;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_tsc_offset(vcpu, data); offset = kvm_compute_l1_tsc_offset(vcpu, data);
ns = get_kvmclock_base_ns(); ns = get_kvmclock_base_ns();
elapsed = ns - kvm->arch.last_tsc_nsec; elapsed = ns - kvm->arch.last_tsc_nsec;
...@@ -2402,7 +2402,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -2402,7 +2402,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
} else { } else {
u64 delta = nsec_to_cycles(vcpu, elapsed); u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta; data += delta;
offset = kvm_compute_tsc_offset(vcpu, data); offset = kvm_compute_l1_tsc_offset(vcpu, data);
} }
matched = true; matched = true;
already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
...@@ -3252,7 +3252,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3252,7 +3252,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated) { if (msr_info->host_initiated) {
kvm_synchronize_tsc(vcpu, data); kvm_synchronize_tsc(vcpu, data);
} else { } else {
u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
adjust_tsc_offset_guest(vcpu, adj); adjust_tsc_offset_guest(vcpu, adj);
vcpu->arch.ia32_tsc_adjust_msr += adj; vcpu->arch.ia32_tsc_adjust_msr += adj;
} }
...@@ -4140,7 +4140,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -4140,7 +4140,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mark_tsc_unstable("KVM discovered backwards TSC"); mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_check_tsc_unstable()) { if (kvm_check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu, u64 offset = kvm_compute_l1_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc); vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset); kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_catchup = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment