Commit 53f98558 authored by Andrew Jones's avatar Andrew Jones Committed by Marc Zyngier

KVM: arm64: pvtime: Fix stolen time accounting across migration

When updating the stolen time we should always read the current
stolen time from the user provided memory, not from a kernel
cache. If we use a cache then we'll end up resetting stolen time
to zero on the first update after migration.
Signed-off-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200804170604.42662-5-drjones@redhat.com
parent 4d2d4ce0
...@@ -368,7 +368,6 @@ struct kvm_vcpu_arch { ...@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */ /* Guest PV state */
struct { struct {
u64 steal;
u64 last_steal; u64 last_steal;
gpa_t base; gpa_t base;
} steal; } steal;
......
...@@ -13,26 +13,22 @@ ...@@ -13,26 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu) void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
u64 base = vcpu->arch.steal.base;
u64 last_steal = vcpu->arch.steal.last_steal; u64 last_steal = vcpu->arch.steal.last_steal;
u64 steal; u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
__le64 steal_le; u64 steal = 0;
u64 offset;
int idx; int idx;
u64 base = vcpu->arch.steal.base;
if (base == GPA_INVALID) if (base == GPA_INVALID)
return; return;
/* Let's do the local bookkeeping */ idx = srcu_read_lock(&kvm->srcu);
steal = vcpu->arch.steal.steal; if (!kvm_get_guest(kvm, base + offset, steal)) {
steal = le64_to_cpu(steal);
vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.steal.last_steal - last_steal; steal += vcpu->arch.steal.last_steal - last_steal;
vcpu->arch.steal.steal = steal; kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
}
steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
kvm_put_guest(kvm, base + offset, steal_le);
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
} }
...@@ -66,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) ...@@ -66,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests * Start counting stolen time from the time the guest requests
* the feature enabled. * the feature enabled.
*/ */
vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay; vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
......
...@@ -749,6 +749,26 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ...@@ -749,6 +749,26 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len); gpa_t gpa, unsigned long len);
#define __kvm_get_guest(kvm, gfn, offset, v) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
int __ret = -EFAULT; \
\
if (!kvm_is_error_hva(__addr)) \
__ret = get_user(v, __uaddr); \
__ret; \
})
#define kvm_get_guest(kvm, gpa, v) \
({ \
gpa_t __gpa = gpa; \
struct kvm *__kvm = kvm; \
\
__kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
offset_in_page(__gpa), v); \
})
#define __kvm_put_guest(kvm, gfn, offset, v) \ #define __kvm_put_guest(kvm, gfn, offset, v) \
({ \ ({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \ unsigned long __addr = gfn_to_hva(kvm, gfn); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment