Commit 6ce18e3a authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

KVM: arm/arm64: GICv4: Handle INVALL applied to a vPE

There is no need to perform an INV for each interrupt when updating
multiple interrupts.  Instead, we can rely on the final VINVALL that
gets sent to the ITS to do the work for all of them.
Acked-by: default avatarChristoffer Dall <cdall@linaro.org>
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent af340f99
...@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its); ...@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its);
static int vgic_its_restore_tables_v0(struct vgic_its *its); static int vgic_its_restore_tables_v0(struct vgic_its *its);
static int vgic_its_commit_v0(struct vgic_its *its); static int vgic_its_commit_v0(struct vgic_its *its);
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
struct kvm_vcpu *filter_vcpu); struct kvm_vcpu *filter_vcpu, bool needs_inv);
/* /*
* Creates a new (reference to a) struct vgic_irq for a given LPI. * Creates a new (reference to a) struct vgic_irq for a given LPI.
...@@ -106,7 +106,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -106,7 +106,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
* However we only have those structs for mapped IRQs, so we read in * However we only have those structs for mapped IRQs, so we read in
* the respective config data from memory here upon mapping the LPI. * the respective config data from memory here upon mapping the LPI.
*/ */
ret = update_lpi_config(kvm, irq, NULL); ret = update_lpi_config(kvm, irq, NULL, false);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id) ...@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
* VCPU. Unconditionally applies if filter_vcpu is NULL. * VCPU. Unconditionally applies if filter_vcpu is NULL.
*/ */
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
struct kvm_vcpu *filter_vcpu) struct kvm_vcpu *filter_vcpu, bool needs_inv)
{ {
u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
u8 prop; u8 prop;
...@@ -298,7 +298,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, ...@@ -298,7 +298,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
} }
if (irq->hw) if (irq->hw)
return its_prop_update_vlpi(irq->host_irq, prop, true); return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
return 0; return 0;
} }
...@@ -1117,7 +1117,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its, ...@@ -1117,7 +1117,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
if (!ite) if (!ite)
return E_ITS_INV_UNMAPPED_INTERRUPT; return E_ITS_INV_UNMAPPED_INTERRUPT;
return update_lpi_config(kvm, ite->irq, NULL); return update_lpi_config(kvm, ite->irq, NULL, true);
} }
/* /*
...@@ -1152,12 +1152,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, ...@@ -1152,12 +1152,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
irq = vgic_get_irq(kvm, NULL, intids[i]); irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq) if (!irq)
continue; continue;
update_lpi_config(kvm, irq, vcpu); update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq); vgic_put_irq(kvm, irq);
} }
kfree(intids); kfree(intids);
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment