Commit cc98702c authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/gic-v4.1' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 76a5db10 dab4fe3b
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
......
...@@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) ...@@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr_el2 &= ~HCR_TWE; vcpu->arch.hcr_el2 &= ~HCR_TWE;
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count)) if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
vcpu->kvm->arch.vgic.nassgireq)
vcpu->arch.hcr_el2 &= ~HCR_TWI; vcpu->arch.hcr_el2 &= ~HCR_TWI;
else else
vcpu->arch.hcr_el2 |= HCR_TWI; vcpu->arch.hcr_el2 |= HCR_TWI;
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
......
This diff is collapsed.
...@@ -723,6 +723,7 @@ static void __init gic_dist_init(void) ...@@ -723,6 +723,7 @@ static void __init gic_dist_init(void)
unsigned int i; unsigned int i;
u64 affinity; u64 affinity;
void __iomem *base = gic_data.dist_base; void __iomem *base = gic_data.dist_base;
u32 val;
/* Disable the distributor */ /* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR); writel_relaxed(0, base + GICD_CTLR);
...@@ -755,9 +756,14 @@ static void __init gic_dist_init(void) ...@@ -755,9 +756,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */ /* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1 */ /* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, writel_relaxed(val, base + GICD_CTLR);
base + GICD_CTLR);
/* /*
* Set all global interrupts to the boot CPU only. ARE must be * Set all global interrupts to the boot CPU only. ARE must be
...@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) ...@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER); typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) { if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base; u64 offset = ptr - region->redist_base;
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr; gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset; gic_data_rdist()->phys_base = region->phys_base + offset;
...@@ -1757,6 +1764,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) ...@@ -1757,6 +1764,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
...@@ -2072,6 +2080,7 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -2072,6 +2080,7 @@ static void __init gic_acpi_setup_kvm_info(void)
} }
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
......
...@@ -85,6 +85,53 @@ ...@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain; static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
if (!has_v4_1())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
if (!name)
goto err;
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
if (!vpe->fwnode)
goto err;
kfree(name);
name = NULL;
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
sgi_domain_ops, vpe);
if (!vpe->sgi_domain)
goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0)
goto err;
return 0;
err:
if (vpe->sgi_domain)
irq_domain_remove(vpe->sgi_domain);
if (vpe->fwnode)
irq_domain_free_fwnode(vpe->fwnode);
kfree(name);
return -ENOMEM;
}
int its_alloc_vcpu_irqs(struct its_vm *vm) int its_alloc_vcpu_irqs(struct its_vm *vm)
{ {
...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0) if (vpe_base_irq <= 0)
goto err; goto err;
for (i = 0; i < vm->nr_vpes; i++) for (i = 0; i < vm->nr_vpes; i++) {
int ret;
vm->vpes[i]->irq = vpe_base_irq + i; vm->vpes[i]->irq = vpe_base_irq + i;
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
if (ret)
goto err;
}
return 0; return 0;
...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return -ENOMEM; return -ENOMEM;
} }
static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
if (!has_v4_1())
return;
for (i = 0; i < vm->nr_vpes; i++) {
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
if (WARN_ON(!irq))
continue;
irq_domain_free_irqs(irq, 16);
irq_domain_remove(vm->vpes[i]->sgi_domain);
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
}
}
void its_free_vcpu_irqs(struct its_vm *vm) void its_free_vcpu_irqs(struct its_vm *vm)
{ {
its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain); irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode); irq_domain_free_fwnode(vm->fwnode);
...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) ...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info); return irq_set_vcpu_affinity(vpe->irq, info);
} }
int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{
struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{ {
struct its_cmd_info info; struct its_cmd_info info = { };
int ret; int ret;
WARN_ON(preemptible()); WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info); ret = its_send_vpe_cmd(vpe, &info);
if (!ret) if (!ret)
vpe->resident = on; vpe->resident = true;
return ret; return ret;
} }
...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) ...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info); return irq_set_vcpu_affinity(irq, &info);
} }
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) int its_prop_update_vsgi(int irq, u8 priority, bool group)
{
struct its_cmd_info info = {
.cmd_type = PROP_UPDATE_VSGI,
{
.priority = priority,
.group = group,
},
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops)
{ {
if (domain) { if (domain) {
pr_info("ITS: Enabling GICv4 support\n"); pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain; gic_domain = domain;
vpe_domain_ops = ops; vpe_domain_ops = vpe_ops;
sgi_domain_ops = sgi_ops;
return 0; return 0;
} }
......
...@@ -70,6 +70,7 @@ struct vgic_global { ...@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */ /* Hardware has GICv4? */
bool has_gicv4; bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */ /* GIC system register CPU interface */
struct static_key_false gicv3_cpuif; struct static_key_false gicv3_cpuif;
...@@ -230,6 +231,9 @@ struct vgic_dist { ...@@ -230,6 +231,9 @@ struct vgic_dist {
/* distributor enabled */ /* distributor enabled */
bool enabled; bool enabled;
/* Wants SGIs without active state */
bool nassgireq;
struct vgic_irq *spis; struct vgic_irq *spis;
struct vgic_io_device dist_iodev; struct vgic_io_device dist_iodev;
......
...@@ -32,6 +32,8 @@ struct gic_kvm_info { ...@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl; struct resource vctrl;
/* vlpi support */ /* vlpi support */
bool has_v4; bool has_v4;
/* rvpeid support */
bool has_v4_1;
}; };
const struct gic_kvm_info *gic_get_kvm_info(void); const struct gic_kvm_info *gic_get_kvm_info(void);
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20 #define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31) #define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1A (1U << 1)
...@@ -90,6 +91,7 @@ ...@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \ #define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7) #define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0) #define GICD_TYPER2_VID GENMASK(4, 0)
...@@ -343,6 +345,15 @@ ...@@ -343,6 +345,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) #define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) #define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/* /*
* ITS registers, offsets from ITS_base * ITS registers, offsets from ITS_base
*/ */
...@@ -366,6 +377,11 @@ ...@@ -366,6 +377,11 @@
#define GITS_TRANSLATER 0x10040 #define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4 #define GITS_CTLR_ITS_NUMBER_SHIFT 4
...@@ -500,8 +516,9 @@ ...@@ -500,8 +516,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */ /* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) #define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) #define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/* /*
...@@ -650,6 +667,7 @@ ...@@ -650,6 +667,7 @@
struct rdists { struct rdists {
struct { struct {
raw_spinlock_t rd_lock;
void __iomem *rd_base; void __iomem *rd_base;
struct page *pend_page; struct page *pend_page;
phys_addr_t phys_base; phys_addr_t phys_base;
......
...@@ -49,10 +49,22 @@ struct its_vpe { ...@@ -49,10 +49,22 @@ struct its_vpe {
}; };
/* GICv4.1 implementations */ /* GICv4.1 implementations */
struct { struct {
struct fwnode_handle *fwnode;
struct irq_domain *sgi_domain;
struct {
u8 priority;
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count; atomic_t vmapp_count;
}; };
}; };
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t vpe_lock;
/* /*
* This collection ID is used to indirect the target * This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in * redistributor for this VPE. The ID itself isn't involved in
...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type { ...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE, SCHEDULE_VPE,
DESCHEDULE_VPE, DESCHEDULE_VPE,
INVALL_VPE, INVALL_VPE,
PROP_UPDATE_VSGI,
}; };
struct its_cmd_info { struct its_cmd_info {
...@@ -105,19 +118,27 @@ struct its_cmd_info { ...@@ -105,19 +118,27 @@ struct its_cmd_info {
bool g0en; bool g0en;
bool g1en; bool g1en;
}; };
struct {
u8 priority;
bool group;
};
}; };
}; };
int its_alloc_vcpu_irqs(struct its_vm *vm); int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq); int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv); int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops; struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
#endif #endif
...@@ -625,6 +625,14 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) ...@@ -625,6 +625,14 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
kvm_update_stolen_time(vcpu); kvm_update_stolen_time(vcpu);
if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
/* The distributor enable bits were changed */
preempt_disable();
vgic_v4_put(vcpu, false);
vgic_v4_load(vcpu);
preempt_enable();
}
} }
} }
......
...@@ -178,6 +178,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, ...@@ -178,6 +178,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
char *type; char *type;
bool pending;
if (irq->intid < VGIC_NR_SGIS) if (irq->intid < VGIC_NR_SGIS)
type = "SGI"; type = "SGI";
else if (irq->intid < VGIC_NR_PRIVATE_IRQS) else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
...@@ -190,6 +192,16 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, ...@@ -190,6 +192,16 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS) if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
print_header(s, irq, vcpu); print_header(s, irq, vcpu);
pending = irq->pending_latch;
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
int err;
err = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&pending);
WARN_ON_ONCE(err);
}
seq_printf(s, " %s %4d " seq_printf(s, " %s %4d "
" %2d " " %2d "
"%d%d%d%d%d%d%d " "%d%d%d%d%d%d%d "
...@@ -201,7 +213,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, ...@@ -201,7 +213,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
"\n", "\n",
type, irq->intid, type, irq->intid,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1, (irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
irq->pending_latch, pending,
irq->line_level, irq->line_level,
irq->active, irq->active,
irq->enabled, irq->enabled,
......
...@@ -3,9 +3,11 @@ ...@@ -3,9 +3,11 @@
* VGICv3 MMIO handling functions * VGICv3 MMIO handling functions
*/ */
#include <linux/bitfield.h>
#include <linux/irqchip/arm-gic-v3.h> #include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <kvm/iodev.h> #include <kvm/iodev.h>
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
...@@ -69,6 +71,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, ...@@ -69,6 +71,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
if (vgic->enabled) if (vgic->enabled)
value |= GICD_CTLR_ENABLE_SS_G1; value |= GICD_CTLR_ENABLE_SS_G1;
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
if (vgic->nassgireq)
value |= GICD_CTLR_nASSGIreq;
break; break;
case GICD_TYPER: case GICD_TYPER:
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS; value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
...@@ -80,6 +84,10 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, ...@@ -80,6 +84,10 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19; value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
} }
break; break;
case GICD_TYPER2:
if (kvm_vgic_global_state.has_gicv4_1)
value = GICD_TYPER2_nASSGIcap;
break;
case GICD_IIDR: case GICD_IIDR:
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) | value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) | (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
...@@ -97,17 +105,46 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, ...@@ -97,17 +105,46 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
bool was_enabled = dist->enabled;
switch (addr & 0x0c) { switch (addr & 0x0c) {
case GICD_CTLR: case GICD_CTLR: {
bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->lock);
was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
if (!was_enabled && dist->enabled) /* Not a GICv4.1? No HW SGIs */
if (!kvm_vgic_global_state.has_gicv4_1)
val &= ~GICD_CTLR_nASSGIreq;
/* Dist stays enabled? nASSGIreq is RO */
if (was_enabled && dist->enabled) {
val &= ~GICD_CTLR_nASSGIreq;
val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
}
/* Switching HW SGIs? */
dist->nassgireq = val & GICD_CTLR_nASSGIreq;
if (is_hwsgi != dist->nassgireq)
vgic_v4_configure_vsgis(vcpu->kvm);
if (kvm_vgic_global_state.has_gicv4_1 &&
was_enabled != dist->enabled)
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm); vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->lock);
break; break;
}
case GICD_TYPER: case GICD_TYPER:
case GICD_TYPER2:
case GICD_IIDR: case GICD_IIDR:
/* This is at best for documentation purposes... */
return; return;
} }
} }
...@@ -116,10 +153,22 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, ...@@ -116,10 +153,22 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val) unsigned long val)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
switch (addr & 0x0c) { switch (addr & 0x0c) {
case GICD_TYPER2:
case GICD_IIDR: case GICD_IIDR:
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
return -EINVAL; return -EINVAL;
return 0;
case GICD_CTLR:
/* Not a GICv4.1? No HW SGIs */
if (!kvm_vgic_global_state.has_gicv4_1)
val &= ~GICD_CTLR_nASSGIreq;
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
dist->nassgireq = val & GICD_CTLR_nASSGIreq;
return 0;
} }
vgic_mmio_write_v3_misc(vcpu, addr, len, val); vgic_mmio_write_v3_misc(vcpu, addr, len, val);
...@@ -257,8 +306,18 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, ...@@ -257,8 +306,18 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
*/ */
for (i = 0; i < len * 8; i++) { for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
bool state = irq->pending_latch;
if (irq->pending_latch) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
int err;
err = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&state);
WARN_ON(err);
}
if (state)
value |= (1U << i); value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
...@@ -942,8 +1001,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) ...@@ -942,8 +1001,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
* generate interrupts of either group. * generate interrupts of either group.
*/ */
if (!irq->group || allow_group1) { if (!irq->group || allow_group1) {
if (!irq->hw) {
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
/* HW SGI? Ask the GIC to inject it */
int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
} else { } else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/bsearch.h> #include <linux/bsearch.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <kvm/iodev.h> #include <kvm/iodev.h>
...@@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, ...@@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
return value; return value;
} }
static void vgic_update_vsgi(struct vgic_irq *irq)
{
WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
}
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned int len, unsigned long val) unsigned int len, unsigned long val)
{ {
...@@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i)); irq->group = !!(val & BIT(i));
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
vgic_update_vsgi(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} else {
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
}
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, ...@@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (vgic_irq_is_mapped_level(irq)) { if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
if (!irq->enabled) {
struct irq_data *data;
irq->enabled = true;
data = &irq_to_desc(irq->host_irq)->irq_data;
while (irqd_irq_disabled(data))
enable_irq(irq->host_irq);
}
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
continue;
} else if (vgic_irq_is_mapped_level(irq)) {
bool was_high = irq->line_level; bool was_high = irq->line_level;
/* /*
...@@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, ...@@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
disable_irq_nosync(irq->host_irq);
irq->enabled = false; irq->enabled = false;
...@@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, ...@@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
for (i = 0; i < len * 8; i++) { for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags; unsigned long flags;
bool val;
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq_is_pending(irq)) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
value |= (1U << i); int err;
val = false;
err = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
} else {
val = irq_is_pending(irq);
}
value |= ((u32)val << i);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
...@@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, ...@@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
} }
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
/* HW SGI? Ask the GIC to inject it */
int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
continue;
}
if (irq->hw) if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess); vgic_hw_irq_spending(vcpu, irq, is_uaccess);
else else
...@@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, ...@@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
/* HW SGI? Ask the GIC to clear its pending bit */
int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
false);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
continue;
}
if (irq->hw) if (irq->hw)
vgic_hw_irq_cpending(vcpu, irq, is_uaccess); vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
else else
...@@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw) { if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
} else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
/*
* GICv4.1 VSGI feature doesn't track an active state,
* so let's not kid ourselves, there is nothing we can
* do here.
*/
irq->active = false;
} else { } else {
u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 model = vcpu->kvm->arch.vgic.vgic_model;
u8 active_source; u8 active_source;
...@@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, ...@@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */ /* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
if (irq->hw && vgic_irq_is_sgi(irq->intid))
vgic_update_vsgi(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
......
...@@ -540,6 +540,8 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -540,6 +540,8 @@ int vgic_v3_map_resources(struct kvm *kvm)
goto out; goto out;
} }
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);
dist->ready = true; dist->ready = true;
out: out:
...@@ -595,7 +597,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -595,7 +597,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */ /* GICv4 support? */
if (info->has_v4) { if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n", kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis"); gicv4_enable ? "en" : "dis");
} }
......
...@@ -67,10 +67,10 @@ ...@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must * it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor). * tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the * This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You * irq corresponding to the vcpu, then call its_make_vpe_resident().
* must be in a non-preemptible context. On exit, another call to * You must be in a non-preemptible context. On exit, a call to
* its_schedule_vpe() tells the redistributor that we're done with the * its_make_vpe_non_resident() tells the redistributor that we're done
* vcpu. * with the vcpu.
* *
* Finally, the doorbell handling: Each vcpu is allocated an interrupt * Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is * which will fire each time a VLPI is made pending whilst the vcpu is
...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) ...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info; struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */ /* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq); disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
...@@ -96,6 +97,104 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) ...@@ -96,6 +97,104 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
{
vpe->sgi_config[irq->intid].enabled = irq->enabled;
vpe->sgi_config[irq->intid].group = irq->group;
vpe->sgi_config[irq->intid].priority = irq->priority;
}
static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
int i;
/*
* With GICv4.1, every virtual SGI can be directly injected. So
* let's pretend that they are HW interrupts, tied to a host
* IRQ. The SGI code will do its magic.
*/
for (i = 0; i < VGIC_NR_SGIS; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
struct irq_desc *desc;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
goto unlock;
irq->hw = true;
irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
/* Transfer the full irq state to the vPE */
vgic_v4_sync_sgi_config(vpe, irq);
desc = irq_to_desc(irq->host_irq);
ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
false);
if (!WARN_ON(ret)) {
/* Transfer pending state */
ret = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
irq->pending_latch);
WARN_ON(ret);
irq->pending_latch = false;
}
unlock:
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < VGIC_NR_SGIS; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
struct irq_desc *desc;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!irq->hw)
goto unlock;
irq->hw = false;
ret = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&irq->pending_latch);
WARN_ON(ret);
desc = irq_to_desc(irq->host_irq);
irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
unlock:
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
/* Must be called with the kvm lock held */
void vgic_v4_configure_vsgis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int i;
kvm_arm_halt_guest(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (dist->nassgireq)
vgic_v4_enable_vsgis(vcpu);
else
vgic_v4_disable_vsgis(vcpu);
}
kvm_arm_resume_guest(kvm);
}
/** /**
* vgic_v4_init - Initialize the GICv4 data structures * vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized * @kvm: Pointer to the VM being initialized
...@@ -140,6 +239,7 @@ int vgic_v4_init(struct kvm *kvm) ...@@ -140,6 +239,7 @@ int vgic_v4_init(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
int irq = dist->its_vm.vpes[i]->irq; int irq = dist->its_vm.vpes[i]->irq;
unsigned long irq_flags = DB_IRQ_FLAGS;
/* /*
* Don't automatically enable the doorbell, as we're * Don't automatically enable the doorbell, as we're
...@@ -147,8 +247,14 @@ int vgic_v4_init(struct kvm *kvm) ...@@ -147,8 +247,14 @@ int vgic_v4_init(struct kvm *kvm)
* blocked. Also disable the lazy disabling, as the * blocked. Also disable the lazy disabling, as the
* doorbell could kick us out of the guest too * doorbell could kick us out of the guest too
* early... * early...
*
* On GICv4.1, the doorbell is managed in HW and must
* be left enabled.
*/ */
irq_set_status_flags(irq, DB_IRQ_FLAGS); if (kvm_vgic_global_state.has_gicv4_1)
irq_flags &= ~IRQ_NOAUTOEN;
irq_set_status_flags(irq, irq_flags);
ret = request_irq(irq, vgic_v4_doorbell_handler, ret = request_irq(irq, vgic_v4_doorbell_handler,
0, "vcpu", vcpu); 0, "vcpu", vcpu);
if (ret) { if (ret) {
...@@ -199,19 +305,11 @@ void vgic_v4_teardown(struct kvm *kvm) ...@@ -199,19 +305,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{ {
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0; return 0;
/* return its_make_vpe_non_resident(vpe, need_db);
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
} }
int vgic_v4_load(struct kvm_vcpu *vcpu) int vgic_v4_load(struct kvm_vcpu *vcpu)
...@@ -232,18 +330,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) ...@@ -232,18 +330,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
/* Disabled the doorbell, as we're about to enter the guest */ err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
if (err) if (err)
return err; return err;
/* /*
* Now that the VPE is resident, let's get rid of a potential * Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending. * doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/ */
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
} }
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,
......
...@@ -316,5 +316,6 @@ void vgic_its_invalidate_cache(struct kvm *kvm); ...@@ -316,5 +316,6 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm); bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment