Commit 91b99ea7 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Rename kvm_vcpu_block() => kvm_vcpu_halt()

Rename kvm_vcpu_block() to kvm_vcpu_halt() in preparation for splitting
the actual "block" sequences into a separate helper (to be named
kvm_vcpu_block()).  x86 will use the standalone block-only path to handle
non-halt cases where the vCPU is not runnable.

Rename block_ns to halt_ns to match the new function name.

No functional change intended.
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 005467e0
...@@ -467,7 +467,7 @@ static void timer_save_state(struct arch_timer_context *ctx) ...@@ -467,7 +467,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
} }
/* /*
* Schedule the background timer before calling kvm_vcpu_block, so that this * Schedule the background timer before calling kvm_vcpu_halt, so that this
* thread is removed from its waitqueue and made runnable when there's a timer * thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle. * interrupt to handle.
*/ */
......
...@@ -681,7 +681,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) ...@@ -681,7 +681,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
vgic_v4_put(vcpu, true); vgic_v4_put(vcpu, true);
preempt_enable(); preempt_enable();
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
preempt_disable(); preempt_disable();
......
...@@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu) ...@@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
* *
* WFE: Yield the CPU and come back to this vcpu when the scheduler * WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to. * decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
* world-switches and schedule other host processes until there is an * world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM. * incoming IRQ or FIQ to the VM.
*/ */
......
...@@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) ...@@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* specification (ARM DEN 0022A). This means all suspend states * specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state. * for KVM will preserve the register state.
*/ */
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return PSCI_RET_SUCCESS; return PSCI_RET_SUCCESS;
......
...@@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) ...@@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
if (!vcpu->arch.pending_exceptions) { if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu); kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1; vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
/* /*
* We we are runnable, then definitely go off to user space to * We we are runnable, then definitely go off to user space to
......
...@@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) { if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) { if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++; vcpu->stat.generic.halt_wakeup++;
......
...@@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) ...@@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_stuff_tce(vcpu); return kvmppc_h_pr_stuff_tce(vcpu);
case H_CEDE: case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++; vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE; return EMULATE_DONE;
......
...@@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) { if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable(); local_irq_enable();
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable(); hard_irq_disable();
......
...@@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) ...@@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
break; break;
case EV_HCALL_TOKEN(EV_IDLE): case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS; r = EV_SUCCESS;
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
break; break;
default: default:
......
...@@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, ...@@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu,
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
if (!kvm_arch_vcpu_runnable(vcpu)) { if (!kvm_arch_vcpu_runnable(vcpu)) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
} }
......
...@@ -1335,7 +1335,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -1335,7 +1335,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer: no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
vcpu->valid_wakeup = false; vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
......
...@@ -8727,6 +8727,13 @@ void kvm_arch_exit(void) ...@@ -8727,6 +8727,13 @@ void kvm_arch_exit(void)
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{ {
/*
* The vCPU has halted, e.g. executed HLT. Update the run state if the
* local APIC is in-kernel, the run loop will detect the non-runnable
* state and halt the vCPU. Exit to userspace if the local APIC is
* managed by userspace, in which case userspace is responsible for
* handling wake events.
*/
++vcpu->stat.halt_exits; ++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) { if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = state; vcpu->arch.mp_state = state;
...@@ -9999,7 +10006,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -9999,7 +10006,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
if (!kvm_arch_vcpu_runnable(vcpu) && if (!kvm_arch_vcpu_runnable(vcpu) &&
(!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_x86_ops.post_block) if (kvm_x86_ops.post_block)
...@@ -10196,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -10196,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
r = -EINTR; r = -EINTR;
goto out; goto out;
} }
kvm_vcpu_block(vcpu); kvm_vcpu_halt(vcpu);
if (kvm_apic_accept_events(vcpu) < 0) { if (kvm_apic_accept_events(vcpu) < 0) {
r = 0; r = 0;
goto out; goto out;
......
...@@ -1102,7 +1102,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); ...@@ -1102,7 +1102,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
......
...@@ -3294,17 +3294,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ...@@ -3294,17 +3294,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
} }
} }
/* void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{ {
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
ktime_t start, cur, poll_end; ktime_t start, cur, poll_end;
bool waited = false; bool waited = false;
u64 block_ns; u64 halt_ns;
start = cur = poll_end = ktime_get(); start = cur = poll_end = ktime_get();
if (do_halt_poll) { if (do_halt_poll) {
...@@ -3346,7 +3343,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3346,7 +3343,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
ktime_to_ns(cur) - ktime_to_ns(poll_end)); ktime_to_ns(cur) - ktime_to_ns(poll_end));
} }
out: out:
block_ns = ktime_to_ns(cur) - ktime_to_ns(start); /* The total time the vCPU was "halted", including polling time. */
halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
/* /*
* Note, halt-polling is considered successful so long as the vCPU was * Note, halt-polling is considered successful so long as the vCPU was
...@@ -3360,24 +3358,24 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3360,24 +3358,24 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
if (!vcpu_valid_wakeup(vcpu)) { if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu); shrink_halt_poll_ns(vcpu);
} else if (vcpu->kvm->max_halt_poll_ns) { } else if (vcpu->kvm->max_halt_poll_ns) {
if (block_ns <= vcpu->halt_poll_ns) if (halt_ns <= vcpu->halt_poll_ns)
; ;
/* we had a long block, shrink polling */ /* we had a long block, shrink polling */
else if (vcpu->halt_poll_ns && else if (vcpu->halt_poll_ns &&
block_ns > vcpu->kvm->max_halt_poll_ns) halt_ns > vcpu->kvm->max_halt_poll_ns)
shrink_halt_poll_ns(vcpu); shrink_halt_poll_ns(vcpu);
/* we had a short halt and our poll time is too small */ /* we had a short halt and our poll time is too small */
else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
block_ns < vcpu->kvm->max_halt_poll_ns) halt_ns < vcpu->kvm->max_halt_poll_ns)
grow_halt_poll_ns(vcpu); grow_halt_poll_ns(vcpu);
} else { } else {
vcpu->halt_poll_ns = 0; vcpu->halt_poll_ns = 0;
} }
} }
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_block); EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment