Commit 61bbe380 authored by Christoffer Dall's avatar Christoffer Dall

KVM: arm/arm64: Avoid work when userspace iqchips are not used

We currently check if the VM has a userspace irqchip in several places
along the critical path, and if so, we do some work which is only
required for having an irqchip in userspace.  This is unfortunate, as we
could avoid doing any work entirely, if we didn't have to support
irqchip in userspace.

Realizing the userspace irqchip on ARM is mostly a developer or hobby
feature, and is unlikely to be used in servers or other scenarios where
performance is a priority, we can use a refcounted static key to only
check the irqchip configuration when we have at least one VM that uses
an irqchip in userspace.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 4c60e360
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
......
...@@ -47,6 +47,8 @@ ...@@ -47,6 +47,8 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext); int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
......
...@@ -103,7 +103,8 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) ...@@ -103,7 +103,8 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
if (kvm_timer_irq_can_fire(vtimer)) if (kvm_timer_irq_can_fire(vtimer))
kvm_timer_update_irq(vcpu, true, vtimer); kvm_timer_update_irq(vcpu, true, vtimer);
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) if (static_branch_unlikely(&userspace_irqchip_in_use) &&
unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm_vtimer_update_mask_user(vcpu); kvm_vtimer_update_mask_user(vcpu);
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -284,7 +285,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, ...@@ -284,7 +285,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level); timer_ctx->irq.level);
if (likely(irqchip_in_kernel(vcpu->kvm))) { if (!static_branch_unlikely(&userspace_irqchip_in_use) &&
likely(irqchip_in_kernel(vcpu->kvm))) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq, timer_ctx->irq.irq,
timer_ctx->irq.level, timer_ctx->irq.level,
......
...@@ -74,6 +74,8 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) ...@@ -74,6 +74,8 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_arm_running_vcpu, vcpu); __this_cpu_write(kvm_arm_running_vcpu, vcpu);
} }
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
/** /**
* kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
* Must be called from non-preemptible context * Must be called from non-preemptible context
...@@ -302,6 +304,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -302,6 +304,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
kvm_arch_vcpu_free(vcpu); kvm_arch_vcpu_free(vcpu);
} }
...@@ -522,15 +526,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -522,15 +526,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true; vcpu->arch.has_run_once = true;
if (likely(irqchip_in_kernel(kvm))) {
/* /*
* Map the VGIC hardware resources before running a vcpu the first * Map the VGIC hardware resources before running a vcpu the
* time on this VM. * first time on this VM.
*/ */
if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { if (unlikely(!vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm); ret = kvm_vgic_map_resources(kvm);
if (ret) if (ret)
return ret; return ret;
} }
} else {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
}
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
if (ret) if (ret)
...@@ -664,17 +676,28 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -664,17 +676,28 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vgic_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu);
/* /*
* If we have a singal pending, or need to notify a userspace * Exit if we have a signal pending so that we can deliver the
* irqchip about timer or PMU level changes, then we exit (and * signal to user space.
* update the timer level state in kvm_timer_update_run */
* below). if (signal_pending(current)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
/*
* If we're using a userspace irqchip, then check if we need
* to tell a userspace irqchip about timer or PMU level
* changes and if so, exit to userspace (the actual level
* state gets updated in kvm_timer_update_run and
* kvm_pmu_update_run below).
*/ */
if (signal_pending(current) || if (static_branch_unlikely(&userspace_irqchip_in_use)) {
kvm_timer_should_notify_user(vcpu) || if (kvm_timer_should_notify_user(vcpu) ||
kvm_pmu_should_notify_user(vcpu)) { kvm_pmu_should_notify_user(vcpu)) {
ret = -EINTR; ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
} }
}
/* /*
* Ensure we set mode to IN_GUEST_MODE after we disable * Ensure we set mode to IN_GUEST_MODE after we disable
...@@ -688,6 +711,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -688,6 +711,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_request_pending(vcpu)) { kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
kvm_pmu_sync_hwstate(vcpu); kvm_pmu_sync_hwstate(vcpu);
if (static_branch_unlikely(&userspace_irqchip_in_use))
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
local_irq_enable(); local_irq_enable();
...@@ -732,6 +756,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -732,6 +756,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* we don't want vtimer interrupts to race with syncing the * we don't want vtimer interrupts to race with syncing the
* timer virtual interrupt state. * timer virtual interrupt state.
*/ */
if (static_branch_unlikely(&userspace_irqchip_in_use))
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment