Commit a749e247 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: lapic: reorganize restart_apic_timer

Move the code to cancel the hv timer into the caller, just before
it starts the hrtimer.  Check availability of the hv timer in
start_hv_timer.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 35ee9e48
...@@ -1495,17 +1495,21 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); ...@@ -1495,17 +1495,21 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_timer(struct kvm_lapic *apic) static void cancel_hv_timer(struct kvm_lapic *apic)
{ {
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
preempt_disable(); preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu); kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false; apic->lapic_timer.hv_timer_in_use = false;
preempt_enable(); preempt_enable();
} }
static bool __start_hv_timer(struct kvm_lapic *apic) static bool start_hv_timer(struct kvm_lapic *apic)
{ {
struct kvm_timer *ktimer = &apic->lapic_timer; struct kvm_timer *ktimer = &apic->lapic_timer;
int r; int r;
if (!kvm_x86_ops->set_hv_timer)
return false;
if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
return false; return false;
...@@ -1523,19 +1527,30 @@ static bool __start_hv_timer(struct kvm_lapic *apic) ...@@ -1523,19 +1527,30 @@ static bool __start_hv_timer(struct kvm_lapic *apic)
*/ */
if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
return false; return false;
trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
return true; return true;
} }
static bool start_hv_timer(struct kvm_lapic *apic) static void start_sw_timer(struct kvm_lapic *apic)
{ {
if (!__start_hv_timer(apic)) { struct kvm_timer *ktimer = &apic->lapic_timer;
if (apic->lapic_timer.hv_timer_in_use) if (apic->lapic_timer.hv_timer_in_use)
cancel_hv_timer(apic); cancel_hv_timer(apic);
} if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
return;
trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
apic->lapic_timer.hv_timer_in_use); start_sw_period(apic);
return apic->lapic_timer.hv_timer_in_use; else if (apic_lvtt_tscdeadline(apic))
start_sw_tscdeadline(apic);
trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
}
static void restart_apic_timer(struct kvm_lapic *apic)
{
if (!start_hv_timer(apic))
start_sw_timer(apic);
} }
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
...@@ -1549,19 +1564,14 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) ...@@ -1549,19 +1564,14 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
if (apic_lvtt_period(apic) && apic->lapic_timer.period) { if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic); advance_periodic_target_expiration(apic);
if (!start_hv_timer(apic)) restart_apic_timer(apic);
start_sw_period(apic);
} }
} }
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; restart_apic_timer(vcpu->arch.apic);
WARN_ON(apic->lapic_timer.hv_timer_in_use);
start_hv_timer(apic);
} }
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer); EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
...@@ -1570,33 +1580,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) ...@@ -1570,33 +1580,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
/* Possibly the TSC deadline timer is not enabled yet */ /* Possibly the TSC deadline timer is not enabled yet */
if (!apic->lapic_timer.hv_timer_in_use) if (apic->lapic_timer.hv_timer_in_use)
return; start_sw_timer(apic);
}
cancel_hv_timer(apic); EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
if (atomic_read(&apic->lapic_timer.pending)) void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
return; {
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) WARN_ON(!apic->lapic_timer.hv_timer_in_use);
start_sw_period(apic); restart_apic_timer(apic);
else if (apic_lvtt_tscdeadline(apic))
start_sw_tscdeadline(apic);
} }
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
static void start_apic_timer(struct kvm_lapic *apic) static void start_apic_timer(struct kvm_lapic *apic)
{ {
atomic_set(&apic->lapic_timer.pending, 0); atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
if (set_target_expiration(apic) && && !set_target_expiration(apic))
!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic))) return;
start_sw_period(apic);
} else if (apic_lvtt_tscdeadline(apic)) { restart_apic_timer(apic);
if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
start_sw_tscdeadline(apic);
}
} }
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
...@@ -1827,16 +1832,6 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) ...@@ -1827,16 +1832,6 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
* LAPIC interface * LAPIC interface
*---------------------------------------------------------------------- *----------------------------------------------------------------------
*/ */
u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!lapic_in_kernel(vcpu))
return 0;
return apic->lapic_timer.tscdeadline;
}
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
......
...@@ -87,7 +87,6 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); ...@@ -87,7 +87,6 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
...@@ -216,4 +215,5 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu); ...@@ -216,4 +215,5 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu); void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu); void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu); bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
#endif #endif
...@@ -2841,10 +2841,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2841,10 +2841,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vcpu_write_tsc_offset(vcpu, offset); kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_catchup = 1;
} }
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu, if (kvm_lapic_hv_timer_in_use(vcpu))
kvm_get_lapic_target_expiration_tsc(vcpu))) kvm_lapic_restart_hv_timer(vcpu);
kvm_lapic_switch_to_sw_timer(vcpu);
/* /*
* On a host with synchronized TSC, there is no need to update * On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration * kvmclock on vcpu->cpu migration
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment