Commit fb280e97 authored by Jintack Lim's avatar Jintack Lim Committed by Marc Zyngier

KVM: arm/arm64: Set a background timer to the earliest timer expiration

When scheduling a background timer, consider both of the virtual and
physical timer and pick the earliest expiration time.
Signed-off-by: default avatarJintack Lim <jintack@cs.columbia.edu>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 58e0c973
...@@ -300,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -300,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
return kvm_timer_should_fire(vcpu_vtimer(vcpu)); return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
kvm_timer_should_fire(vcpu_ptimer(vcpu));
} }
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
......
...@@ -118,6 +118,35 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) ...@@ -118,6 +118,35 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
return 0; return 0;
} }
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
/*
* Returns the earliest expiration time in ns among guest timers.
* Note that it will return 0 if none of timers can fire.
*/
static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
{
u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
if (kvm_timer_irq_can_fire(vtimer))
min_virt = kvm_timer_compute_delta(vtimer);
if (kvm_timer_irq_can_fire(ptimer))
min_phys = kvm_timer_compute_delta(ptimer);
/* If none of timers can fire, then return 0 */
if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
return 0;
return min(min_virt, min_phys);
}
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{ {
struct arch_timer_cpu *timer; struct arch_timer_cpu *timer;
...@@ -132,7 +161,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) ...@@ -132,7 +161,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
* PoV (NTP on the host may have forced it to expire * PoV (NTP on the host may have forced it to expire
* early). If we should have slept longer, restart it. * early). If we should have slept longer, restart it.
*/ */
ns = kvm_timer_compute_delta(vcpu_vtimer(vcpu)); ns = kvm_timer_earliest_exp(vcpu);
if (unlikely(ns)) { if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns)); hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART; return HRTIMER_RESTART;
...@@ -142,12 +171,6 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) ...@@ -142,12 +171,6 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{ {
u64 cval, now; u64 cval, now;
...@@ -215,26 +238,30 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) ...@@ -215,26 +238,30 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
BUG_ON(timer_is_armed(timer)); BUG_ON(timer_is_armed(timer));
/* /*
* No need to schedule a background timer if the guest timer has * No need to schedule a background timer if any guest timer has
* already expired, because kvm_vcpu_block will return before putting * already expired, because kvm_vcpu_block will return before putting
* the thread to sleep. * the thread to sleep.
*/ */
if (kvm_timer_should_fire(vtimer)) if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
return; return;
/* /*
* If the timer is not capable of raising interrupts (disabled or * If both timers are not capable of raising interrupts (disabled or
* masked), then there's no more work for us to do. * masked), then there's no more work for us to do.
*/ */
if (!kvm_timer_irq_can_fire(vtimer)) if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
return; return;
/* The timer has not yet expired, schedule a background timer */ /*
timer_arm(timer, kvm_timer_compute_delta(vtimer)); * The guest timers have not yet expired, schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
timer_arm(timer, kvm_timer_earliest_exp(vcpu));
} }
void kvm_timer_unschedule(struct kvm_vcpu *vcpu) void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment