Commit 99e3e30a authored by Zachary Amsden's avatar Zachary Amsden Committed by Avi Kivity

KVM: x86: Move TSC offset writes to common code

Also, ensure that the storing of the offset and the reading of the TSC
are never preempted by taking a spinlock.  While the lock is overkill
now, it is useful later in this patch series.
Signed-off-by: default avatarZachary Amsden <zamsden@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent f4e1b3c8
...@@ -395,6 +395,7 @@ struct kvm_arch { ...@@ -395,6 +395,7 @@ struct kvm_arch {
unsigned long irq_sources_bitmap; unsigned long irq_sources_bitmap;
s64 kvmclock_offset; s64 kvmclock_offset;
spinlock_t tsc_write_lock;
struct kvm_xen_hvm_config xen_hvm_config; struct kvm_xen_hvm_config xen_hvm_config;
...@@ -521,6 +522,8 @@ struct kvm_x86_ops { ...@@ -521,6 +522,8 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void); bool (*has_wbinvd_exit)(void);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
const struct trace_print_flags *exit_reasons_str; const struct trace_print_flags *exit_reasons_str;
}; };
......
...@@ -915,7 +915,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -915,7 +915,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0; svm->asid_generation = 0;
init_vmcb(svm); init_vmcb(svm);
svm_write_tsc_offset(&svm->vcpu, 0-native_read_tsc()); kvm_write_tsc(&svm->vcpu, 0);
err = fx_init(&svm->vcpu); err = fx_init(&svm->vcpu);
if (err) if (err)
...@@ -2581,7 +2581,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -2581,7 +2581,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
switch (ecx) { switch (ecx) {
case MSR_IA32_TSC: case MSR_IA32_TSC:
svm_write_tsc_offset(vcpu, data - native_read_tsc()); kvm_write_tsc(vcpu, data);
break; break;
case MSR_STAR: case MSR_STAR:
svm->vmcb->save.star = data; svm->vmcb->save.star = data;
...@@ -3551,6 +3551,8 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -3551,6 +3551,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_supported_cpuid = svm_set_supported_cpuid, .set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit, .has_wbinvd_exit = svm_has_wbinvd_exit,
.write_tsc_offset = svm_write_tsc_offset,
}; };
static int __init svm_init(void) static int __init svm_init(void)
......
...@@ -1146,10 +1146,9 @@ static u64 guest_read_tsc(void) ...@@ -1146,10 +1146,9 @@ static u64 guest_read_tsc(void)
} }
/* /*
* writes 'guest_tsc' into guest's timestamp counter "register" * writes 'offset' into guest's timestamp counter offset register
* guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
*/ */
static void vmx_write_tsc_offset(u64 offset) static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{ {
vmcs_write64(TSC_OFFSET, offset); vmcs_write64(TSC_OFFSET, offset);
} }
...@@ -1224,7 +1223,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -1224,7 +1223,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr; struct shared_msr_entry *msr;
u64 host_tsc;
int ret = 0; int ret = 0;
switch (msr_index) { switch (msr_index) {
...@@ -1254,8 +1252,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -1254,8 +1252,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmcs_writel(GUEST_SYSENTER_ESP, data); vmcs_writel(GUEST_SYSENTER_ESP, data);
break; break;
case MSR_IA32_TSC: case MSR_IA32_TSC:
rdtscll(host_tsc); kvm_write_tsc(vcpu, data);
vmx_write_tsc_offset(data - host_tsc);
break; break;
case MSR_IA32_CR_PAT: case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
...@@ -2653,7 +2650,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2653,7 +2650,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
vmx_write_tsc_offset(0-native_read_tsc()); kvm_write_tsc(&vmx->vcpu, 0);
return 0; return 0;
} }
...@@ -4348,6 +4345,8 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -4348,6 +4345,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_supported_cpuid = vmx_set_supported_cpuid, .set_supported_cpuid = vmx_set_supported_cpuid,
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.write_tsc_offset = vmx_write_tsc_offset,
}; };
static int __init vmx_init(void) static int __init vmx_init(void)
......
...@@ -895,6 +895,22 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info * ...@@ -895,6 +895,22 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
u64 offset;
unsigned long flags;
spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = data - native_read_tsc();
kvm_x86_ops->write_tsc_offset(vcpu, offset);
spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
}
EXPORT_SYMBOL_GPL(kvm_write_tsc);
static void kvm_write_guest_time(struct kvm_vcpu *v) static void kvm_write_guest_time(struct kvm_vcpu *v)
{ {
struct timespec ts; struct timespec ts;
...@@ -5495,6 +5511,8 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -5495,6 +5511,8 @@ struct kvm *kvm_arch_create_vm(void)
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
spin_lock_init(&kvm->arch.tsc_write_lock);
return kvm; return kvm;
} }
......
...@@ -68,4 +68,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu) ...@@ -68,4 +68,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment