Commit 53692908 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI

Now that we make sure we don't inject multiple instances of the
same GICv2 SGI at the same time, we've made another bug more
obvious:

If we exit with an active SGI, we completely lose track of which
vcpu it came from. On the next entry, we restore it with 0 as a
source, and if that wasn't the right one, too bad. While this
doesn't seem to trouble GIC-400, the architectural model gets
offended and doesn't deactivate the interrupt on EOI.

Another connected issue is that we will happilly make pending
an interrupt from another vcpu, overriding the above zero with
something that is just as inconsistent. Don't do that.

The final issue is that we signal a maintenance interrupt when
no pending interrupts are present in the LR. Assuming we've fixed
the two issues above, we end-up in a situation where we keep
exiting as soon as we've reached the active state, and not be
able to inject the following pending.

The fix comes in 3 parts:
- GICv2 SGIs have their source vcpu saved if they are active on
  exit, and restored on entry
- Multi-SGIs cannot go via the Pending+Active state, as this would
  corrupt the source field
- Multi-SGIs are converted to using MI on EOI instead of NPIE

Fixes: 16ca6a60 ("KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid")
Reported-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 85bd0ba1
...@@ -131,6 +131,7 @@ struct vgic_irq { ...@@ -131,6 +131,7 @@ struct vgic_irq {
u32 mpidr; /* GICv3 target VCPU */ u32 mpidr; /* GICv3 target VCPU */
}; };
u8 source; /* GICv2 SGIs only */ u8 source; /* GICv2 SGIs only */
u8 active_source; /* GICv2 SGIs only */
u8 priority; u8 priority;
enum vgic_irq_config config; /* Level or edge */ enum vgic_irq_config config; /* Level or edge */
......
...@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->vcpu->cpu != -1) /* VCPU thread is running */ irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock); cond_resched_lock(&irq->irq_lock);
if (irq->hw) if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
else } else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
irq->active = active; irq->active = active;
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
active && vgic_irq_is_sgi(irq->intid))
irq->active_source = requester_vcpu->vcpu_id;
}
if (irq->active) if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
......
...@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void) ...@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0); vgic_v2_write_lr(i, 0);
} }
void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
cpuif->vgic_hcr |= GICH_HCR_NPIE;
}
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
...@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
int lr; int lr;
unsigned long flags; unsigned long flags;
cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); cpuif->vgic_hcr &= ~GICH_HCR_UIE;
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr]; u32 val = cpuif->vgic_lr[lr];
u32 intid = val & GICH_LR_VIRTUALID; u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq; struct vgic_irq *irq;
/* Extract the source vCPU id from the LR */
cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
cpuid &= 7;
/* Notify fds when the guest EOI'ed a level-triggered SPI */ /* Notify fds when the guest EOI'ed a level-triggered SPI */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0, kvm_notify_acked_irq(vcpu->kvm, 0,
...@@ -90,18 +88,17 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -90,18 +88,17 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT); irq->active = !!(val & GICH_LR_ACTIVE_BIT);
if (irq->active && vgic_irq_is_sgi(intid))
irq->active_source = cpuid;
/* Edge is the only case where we preserve the pending bit */ /* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE && if (irq->config == VGIC_CONFIG_EDGE &&
(val & GICH_LR_PENDING_BIT)) { (val & GICH_LR_PENDING_BIT)) {
irq->pending_latch = true; irq->pending_latch = true;
if (vgic_irq_is_sgi(intid)) { if (vgic_irq_is_sgi(intid))
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
irq->source |= (1 << cpuid); irq->source |= (1 << cpuid);
} }
}
/* /*
* Clear soft pending state when level irqs have been acked. * Clear soft pending state when level irqs have been acked.
...@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
u32 val = irq->intid; u32 val = irq->intid;
bool allow_pending = true; bool allow_pending = true;
if (irq->active) if (irq->active) {
val |= GICH_LR_ACTIVE_BIT; val |= GICH_LR_ACTIVE_BIT;
if (vgic_irq_is_sgi(irq->intid))
val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
if (vgic_irq_is_multi_sgi(irq)) {
allow_pending = false;
val |= GICH_LR_EOI;
}
}
if (irq->hw) { if (irq->hw) {
val |= GICH_LR_HW; val |= GICH_LR_HW;
...@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
BUG_ON(!src); BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1)); irq->source &= ~(1 << (src - 1));
if (irq->source) if (irq->source) {
irq->pending_latch = true; irq->pending_latch = true;
val |= GICH_LR_EOI;
}
} }
} }
......
...@@ -27,13 +27,6 @@ static bool group1_trap; ...@@ -27,13 +27,6 @@ static bool group1_trap;
static bool common_trap; static bool common_trap;
static bool gicv4_enable; static bool gicv4_enable;
void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
cpuif->vgic_hcr |= ICH_HCR_NPIE;
}
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
...@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
int lr; int lr;
unsigned long flags; unsigned long flags;
cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); cpuif->vgic_hcr &= ~ICH_HCR_UIE;
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr]; u64 val = cpuif->vgic_lr[lr];
u32 intid; u32 intid, cpuid;
struct vgic_irq *irq; struct vgic_irq *irq;
bool is_v2_sgi = false;
if (model == KVM_DEV_TYPE_ARM_VGIC_V3) cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
intid = val & ICH_LR_VIRTUAL_ID_MASK; intid = val & ICH_LR_VIRTUAL_ID_MASK;
else } else {
intid = val & GICH_LR_VIRTUALID; intid = val & GICH_LR_VIRTUALID;
is_v2_sgi = vgic_irq_is_sgi(intid);
}
/* Notify fds when the guest EOI'ed a level-triggered IRQ */ /* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
...@@ -81,19 +80,17 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -81,19 +80,17 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT); irq->active = !!(val & ICH_LR_ACTIVE_BIT);
if (irq->active && is_v2_sgi)
irq->active_source = cpuid;
/* Edge is the only case where we preserve the pending bit */ /* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE && if (irq->config == VGIC_CONFIG_EDGE &&
(val & ICH_LR_PENDING_BIT)) { (val & ICH_LR_PENDING_BIT)) {
irq->pending_latch = true; irq->pending_latch = true;
if (vgic_irq_is_sgi(intid) && if (is_v2_sgi)
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
irq->source |= (1 << cpuid); irq->source |= (1 << cpuid);
} }
}
/* /*
* Clear soft pending state when level irqs have been acked. * Clear soft pending state when level irqs have been acked.
...@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
{ {
u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 model = vcpu->kvm->arch.vgic.vgic_model;
u64 val = irq->intid; u64 val = irq->intid;
bool allow_pending = true; bool allow_pending = true, is_v2_sgi;
if (irq->active) is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
model == KVM_DEV_TYPE_ARM_VGIC_V2);
if (irq->active) {
val |= ICH_LR_ACTIVE_BIT; val |= ICH_LR_ACTIVE_BIT;
if (is_v2_sgi)
val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
if (vgic_irq_is_multi_sgi(irq)) {
allow_pending = false;
val |= ICH_LR_EOI;
}
}
if (irq->hw) { if (irq->hw) {
val |= ICH_LR_HW; val |= ICH_LR_HW;
...@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
BUG_ON(!src); BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1)); irq->source &= ~(1 << (src - 1));
if (irq->source) if (irq->source) {
irq->pending_latch = true; irq->pending_latch = true;
val |= ICH_LR_EOI;
}
} }
} }
......
...@@ -719,14 +719,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) ...@@ -719,14 +719,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vgic_v3_set_underflow(vcpu); vgic_v3_set_underflow(vcpu);
} }
static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
{
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_set_npie(vcpu);
else
vgic_v3_set_npie(vcpu);
}
/* Requires the ap_list_lock to be held. */ /* Requires the ap_list_lock to be held. */
static int compute_ap_list_depth(struct kvm_vcpu *vcpu, static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
bool *multi_sgi) bool *multi_sgi)
...@@ -740,17 +732,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, ...@@ -740,17 +732,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w;
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */ /* GICv2 SGIs can count for more than one... */
if (vgic_irq_is_sgi(irq->intid) && irq->source) { w = vgic_irq_get_lr_count(irq);
int w = hweight8(irq->source); spin_unlock(&irq->irq_lock);
count += w; count += w;
*multi_sgi |= (w > 1); *multi_sgi |= (w > 1);
} else {
count++;
}
spin_unlock(&irq->irq_lock);
} }
return count; return count;
} }
...@@ -761,7 +751,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -761,7 +751,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq; struct vgic_irq *irq;
int count; int count;
bool npie = false;
bool multi_sgi; bool multi_sgi;
u8 prio = 0xff; u8 prio = 0xff;
...@@ -791,11 +780,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -791,11 +780,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
if (likely(vgic_target_oracle(irq) == vcpu)) { if (likely(vgic_target_oracle(irq) == vcpu)) {
vgic_populate_lr(vcpu, irq, count++); vgic_populate_lr(vcpu, irq, count++);
if (irq->source) { if (irq->source)
npie = true;
prio = irq->priority; prio = irq->priority;
} }
}
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
...@@ -807,9 +794,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -807,9 +794,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
} }
} }
if (npie)
vgic_set_npie(vcpu);
vcpu->arch.vgic_cpu.used_lrs = count; vcpu->arch.vgic_cpu.used_lrs = count;
/* Nuke remaining LRs */ /* Nuke remaining LRs */
......
...@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq) ...@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
return irq->config == VGIC_CONFIG_LEVEL && irq->hw; return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
} }
static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
{
/* Account for the active state as an interrupt */
if (vgic_irq_is_sgi(irq->intid) && irq->source)
return hweight8(irq->source) + irq->active;
return irq_is_pending(irq) || irq->active;
}
static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
{
return vgic_irq_get_lr_count(irq) > 1;
}
/* /*
* This struct provides an intermediate representation of the fields contained * This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment