Commit f1e2b260 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: Allow adjust_tsc_offset to be in host or guest cycles

Redefine the API to take a parameter indicating whether an
adjustment is in host or guest cycles.
Signed-off-by: default avatarZachary Amsden <zamsden@gmail.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6f526ec5
...@@ -646,7 +646,7 @@ struct kvm_x86_ops { ...@@ -646,7 +646,7 @@ struct kvm_x86_ops {
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int (*get_lpage_level)(void); int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void); bool (*rdtscp_supported)(void);
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
...@@ -676,6 +676,17 @@ struct kvm_arch_async_pf { ...@@ -676,6 +676,17 @@ struct kvm_arch_async_pf {
extern struct kvm_x86_ops *kvm_x86_ops; extern struct kvm_x86_ops *kvm_x86_ops;
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}
int kvm_mmu_module_init(void); int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void); void kvm_mmu_module_exit(void);
......
...@@ -1016,10 +1016,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1016,10 +1016,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS); mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
} }
static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
WARN_ON(adjustment < 0);
if (host)
adjustment = svm_scale_tsc(vcpu, adjustment);
svm->vmcb->control.tsc_offset += adjustment; svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment; svm->nested.hsave->control.tsc_offset += adjustment;
......
...@@ -1856,7 +1856,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1856,7 +1856,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
} }
} }
static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
{ {
u64 offset = vmcs_read64(TSC_OFFSET); u64 offset = vmcs_read64(TSC_OFFSET);
vmcs_write64(TSC_OFFSET, offset + adjustment); vmcs_write64(TSC_OFFSET, offset + adjustment);
......
...@@ -1116,7 +1116,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1116,7 +1116,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->tsc_catchup) { if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns); u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) { if (tsc > tsc_timestamp) {
kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc; tsc_timestamp = tsc;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment