Commit e08d8d29 authored by Julien Thierry's avatar Julien Thierry Committed by Christoffer Dall

KVM: arm/arm64: vgic: Make vgic_cpu->ap_list_lock a raw_spinlock

vgic_cpu->ap_list_lock must always be taken with interrupts disabled as
it is used in interrupt context.

For configurations such as PREEMPT_RT_FULL, this means that it should
be a raw_spinlock since RT spinlocks are interruptible.
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
parent fc3bc475
...@@ -307,7 +307,7 @@ struct vgic_cpu { ...@@ -307,7 +307,7 @@ struct vgic_cpu {
unsigned int used_lrs; unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
spinlock_t ap_list_lock; /* Protects the ap_list */ raw_spinlock_t ap_list_lock; /* Protects the ap_list */
/* /*
* List of IRQs that this VCPU should consider because they are either * List of IRQs that this VCPU should consider because they are either
......
...@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
spin_lock_init(&vgic_cpu->ap_list_lock); raw_spin_lock_init(&vgic_cpu->ap_list_lock);
/* /*
* Enable and configure all SGIs to be edge-triggered and * Enable and configure all SGIs to be edge-triggered and
......
...@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* When taking more than one ap_list_lock at the same time, always take the * When taking more than one ap_list_lock at the same time, always take the
* lowest numbered VCPU's ap_list_lock first, so: * lowest numbered VCPU's ap_list_lock first, so:
* vcpuX->vcpu_id < vcpuY->vcpu_id: * vcpuX->vcpu_id < vcpuY->vcpu_id:
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
* *
* Since the VGIC must support injecting virtual interrupts from ISRs, we have * Since the VGIC must support injecting virtual interrupts from ISRs, we have
* to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
* spinlocks for any lock that may be taken while injecting an interrupt. * spinlocks for any lock that may be taken while injecting an interrupt.
*/ */
...@@ -351,7 +351,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -351,7 +351,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
/* someone can do stuff here, which we re-check below */ /* someone can do stuff here, which we re-check below */
spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
raw_spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* /*
...@@ -368,7 +368,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -368,7 +368,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
raw_spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
flags);
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
goto retry; goto retry;
...@@ -383,7 +384,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -383,7 +384,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
irq->vcpu = vcpu; irq->vcpu = vcpu;
raw_spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
...@@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
retry: retry:
spin_lock(&vgic_cpu->ap_list_lock); raw_spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
...@@ -638,7 +639,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -638,7 +639,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
/* This interrupt looks like it has to be migrated. */ /* This interrupt looks like it has to be migrated. */
raw_spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock(&vgic_cpu->ap_list_lock); raw_spin_unlock(&vgic_cpu->ap_list_lock);
/* /*
* Ensure locking order by always locking the smallest * Ensure locking order by always locking the smallest
...@@ -652,9 +653,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -652,9 +653,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
vcpuB = vcpu; vcpuB = vcpu;
} }
spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
raw_spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* /*
...@@ -676,8 +677,8 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -676,8 +677,8 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
} }
raw_spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
if (target_vcpu_needs_kick) { if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
...@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
spin_unlock(&vgic_cpu->ap_list_lock); raw_spin_unlock(&vgic_cpu->ap_list_lock);
} }
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
...@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu); vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
if (can_access_vgic_from_kernel()) if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu); vgic_restore_state(vcpu);
...@@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) ...@@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
raw_spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
...@@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) ...@@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
break; break;
} }
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
return pending; return pending;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment