Commit 6f390916 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: s390: Ensure kvm_arch_no_poll() is read once when blocking vCPU

Wrap s390's halt_poll_max_steal with READ_ONCE and snapshot the result of
kvm_arch_no_poll() in kvm_vcpu_block() to avoid a mostly-theoretical,
largely benign bug on s390 where the result of kvm_arch_no_poll() could
change due to userspace modifying halt_poll_max_steal while the vCPU is
blocking.  The bug is largely benign as it will either cause KVM to skip
updating halt-polling times (no_poll toggles false=>true) or to update
halt-polling times with a slightly flawed block_ns.

Note, READ_ONCE is unnecessary in the current code, add it in case the
arch hook is ever inlined, and to provide a hint that userspace can
change the param at will.

Fixes: 8b905d28 ("KVM: s390: provide kvm_arch_no_poll function")
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 91b01895
...@@ -3403,7 +3403,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) ...@@ -3403,7 +3403,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{ {
/* do not poll with more than halt_poll_max_steal percent of steal time */ /* do not poll with more than halt_poll_max_steal percent of steal time */
if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
halt_poll_max_steal) { READ_ONCE(halt_poll_max_steal)) {
vcpu->stat.halt_no_poll_steal++; vcpu->stat.halt_no_poll_steal++;
return true; return true;
} }
......
...@@ -3284,6 +3284,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) ...@@ -3284,6 +3284,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
*/ */
void kvm_vcpu_block(struct kvm_vcpu *vcpu) void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{ {
bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
ktime_t start, cur, poll_end; ktime_t start, cur, poll_end;
bool waited = false; bool waited = false;
u64 block_ns; u64 block_ns;
...@@ -3291,7 +3292,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3291,7 +3292,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu); kvm_arch_vcpu_blocking(vcpu);
start = cur = poll_end = ktime_get(); start = cur = poll_end = ktime_get();
if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { if (vcpu->halt_poll_ns && halt_poll_allowed) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.generic.halt_attempted_poll; ++vcpu->stat.generic.halt_attempted_poll;
...@@ -3346,7 +3347,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3346,7 +3347,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
update_halt_poll_stats( update_halt_poll_stats(
vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
if (!kvm_arch_no_poll(vcpu)) { if (halt_poll_allowed) {
if (!vcpu_valid_wakeup(vcpu)) { if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu); shrink_halt_poll_ns(vcpu);
} else if (vcpu->kvm->max_halt_poll_ns) { } else if (vcpu->kvm->max_halt_poll_ns) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment