Commit 332967a3 authored by Andrew Jones's avatar Andrew Jones Committed by Paolo Bonzini

x86: kvm: introduce periodic global clock updates

commit 0061d53d introduced a mechanism to execute a global clock
update for a vm. We can apply this periodically in order to propagate
host NTP corrections. Also, if all vcpus of a vm are pinned, then
without an additional trigger, no guest NTP corrections can propagate
either, as the current trigger is only vcpu cpu migration.
Signed-off-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7e44e449
...@@ -599,6 +599,7 @@ struct kvm_arch { ...@@ -599,6 +599,7 @@ struct kvm_arch {
u64 master_kernel_ns; u64 master_kernel_ns;
cycle_t master_cycle_now; cycle_t master_cycle_now;
struct delayed_work kvmclock_update_work; struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
struct kvm_xen_hvm_config xen_hvm_config; struct kvm_xen_hvm_config xen_hvm_config;
......
...@@ -1660,6 +1660,20 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) ...@@ -1660,6 +1660,20 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
KVMCLOCK_UPDATE_DELAY); KVMCLOCK_UPDATE_DELAY);
} }
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
static void kvmclock_sync_fn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
kvmclock_sync_work);
struct kvm *kvm = container_of(ka, struct kvm, arch);
schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD);
}
static bool msr_mtrr_valid(unsigned msr) static bool msr_mtrr_valid(unsigned msr)
{ {
switch (msr) { switch (msr) {
...@@ -6736,6 +6750,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -6736,6 +6750,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
int r; int r;
struct msr_data msr; struct msr_data msr;
struct kvm *kvm = vcpu->kvm;
r = vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r) if (r)
...@@ -6746,6 +6761,9 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -6746,6 +6761,9 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
kvm_write_tsc(vcpu, &msr); kvm_write_tsc(vcpu, &msr);
vcpu_put(vcpu); vcpu_put(vcpu);
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD);
return r; return r;
} }
...@@ -7039,6 +7057,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -7039,6 +7057,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
pvclock_update_vm_gtod_copy(kvm); pvclock_update_vm_gtod_copy(kvm);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
return 0; return 0;
} }
...@@ -7077,6 +7096,7 @@ static void kvm_free_vcpus(struct kvm *kvm) ...@@ -7077,6 +7096,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_sync_events(struct kvm *kvm) void kvm_arch_sync_events(struct kvm *kvm)
{ {
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
kvm_free_all_assigned_devices(kvm); kvm_free_all_assigned_devices(kvm);
kvm_free_pit(kvm); kvm_free_pit(kvm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment