Commit fc3bc475 authored by Julien Thierry's avatar Julien Thierry Committed by Christoffer Dall

KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock

vgic_dist->lpi_list_lock must always be taken with interrupts disabled as
it is used in interrupt context.

For configurations such as PREEMPT_RT_FULL, this means that it should
be a raw_spinlock since RT spinlocks are interruptible.
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
parent 8fa3adb8
......@@ -256,7 +256,7 @@ struct vgic_dist {
u64 propbaser;
/* Protects the lpi_list and the count value below. */
spinlock_t lpi_list_lock;
raw_spinlock_t lpi_list_lock;
struct list_head lpi_list_head;
int lpi_list_count;
......
......@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
INIT_LIST_HEAD(&dist->lpi_list_head);
spin_lock_init(&dist->lpi_list_lock);
raw_spin_lock_init(&dist->lpi_list_lock);
}
/* CREATION */
......
......@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu;
irq->group = 1;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
......@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++;
out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
......@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids)
return -ENOMEM;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count)
break;
......@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;
......
......@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
struct vgic_irq *irq = NULL;
unsigned long flags;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid)
......@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL;
out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
}
......@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return;
spin_lock_irqsave(&dist->lpi_list_lock, flags);
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return;
};
list_del(&irq->lpi_list);
dist->lpi_list_count--;
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment