Commit 0759d068 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: cleanup handle_wait by reusing kvm_vcpu_block

This patch cleans up the code in handle_wait by reusing the common code
function kvm_vcpu_block.

signal_pending(), kvm_cpu_has_pending_timer() and kvm_arch_vcpu_runnable() are
sufficient for checking if we need to wake-up that VCPU. kvm_vcpu_block
uses these functions, so no checks are lost.

The flag "timer_due" can be removed - kvm_cpu_has_pending_timer() tests whether
the timer is pending, thus the vcpu is correctly woken up.
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Acked-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 6352e4d2
...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt { ...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
struct list_head list; struct list_head list;
atomic_t active; atomic_t active;
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
int timer_due; /* event indicator for waitqueue below */
wait_queue_head_t *wq; wait_queue_head_t *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits; unsigned int action_bits;
......
...@@ -585,60 +585,32 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -585,60 +585,32 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{ {
u64 now, sltime; u64 now, sltime;
DECLARE_WAITQUEUE(wait, current);
vcpu->stat.exit_wait_state++; vcpu->stat.exit_wait_state++;
if (kvm_cpu_has_interrupt(vcpu))
return 0;
__set_cpu_idle(vcpu); /* fast path */
spin_lock_bh(&vcpu->arch.local_int.lock); if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
vcpu->arch.local_int.timer_due = 0; return 0;
spin_unlock_bh(&vcpu->arch.local_int.lock);
if (psw_interrupts_disabled(vcpu)) { if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
__unset_cpu_idle(vcpu);
return -EOPNOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
__set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) { if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
goto no_timer; goto no_timer;
} }
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu);
return 0;
}
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer: no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
spin_lock(&vcpu->arch.local_int.float_int->lock); kvm_vcpu_block(vcpu);
spin_lock_bh(&vcpu->arch.local_int.lock);
add_wait_queue(&vcpu->wq, &wait);
while (list_empty(&vcpu->arch.local_int.list) &&
list_empty(&vcpu->arch.local_int.float_int->list) &&
(!vcpu->arch.local_int.timer_due) &&
!signal_pending(current) &&
!kvm_s390_si_ext_call_pending(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
schedule();
spin_lock(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
}
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&vcpu->wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
...@@ -649,11 +621,8 @@ void kvm_s390_tasklet(unsigned long parm) ...@@ -649,11 +621,8 @@ void kvm_s390_tasklet(unsigned long parm)
{ {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
spin_lock(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.timer_due = 1;
if (waitqueue_active(&vcpu->wq)) if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
spin_unlock(&vcpu->arch.local_int.lock);
} }
/* /*
......
...@@ -1068,6 +1068,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1068,6 +1068,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
/* nothing to do, just clear the request */
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment