Commit fc3790fa authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-gicv4-for-v4.15' of...

Merge tag 'kvm-arm-gicv4-for-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

GICv4 Support for KVM/ARM for v4.15
parents cf9b0772 95b110ab
...@@ -1890,6 +1890,10 @@ ...@@ -1890,6 +1890,10 @@
[KVM,ARM] Trap guest accesses to GICv3 common [KVM,ARM] Trap guest accesses to GICv3 common
system registers system registers
kvm-arm.vgic_v4_enable=
[KVM,ARM] Allow use of GICv4 for direct injection of
LPIs.
kvm-intel.ept= [KVM,Intel] Disable extended page tables kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips. (virtualized MMU) support on capable Intel chips.
Default is 1 (enabled) Default is 1 (enabled)
......
...@@ -64,6 +64,8 @@ Groups: ...@@ -64,6 +64,8 @@ Groups:
-EINVAL: Inconsistent restored data -EINVAL: Inconsistent restored data
-EFAULT: Invalid guest ram access -EFAULT: Invalid guest ram access
-EBUSY: One or more VCPUS are running -EBUSY: One or more VCPUS are running
-EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
state is not available
KVM_DEV_ARM_VGIC_GRP_ITS_REGS KVM_DEV_ARM_VGIC_GRP_ITS_REGS
Attributes: Attributes:
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# #
source "virt/kvm/Kconfig" source "virt/kvm/Kconfig"
source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
...@@ -23,6 +24,8 @@ config KVM ...@@ -23,6 +24,8 @@ config KVM
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select ARM_GIC select ARM_GIC
select ARM_GIC_V3
select ARM_GIC_V3_ITS
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO select KVM_MMIO
...@@ -36,6 +39,8 @@ config KVM ...@@ -36,6 +39,8 @@ config KVM
select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI select HAVE_KVM_MSI
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help--- ---help---
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
......
...@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o ...@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-v3.o obj-y += $(KVM)/arm/vgic/vgic-v3.o
obj-y += $(KVM)/arm/vgic/vgic-v4.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# #
source "virt/kvm/Kconfig" source "virt/kvm/Kconfig"
source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
...@@ -36,6 +37,8 @@ config KVM ...@@ -36,6 +37,8 @@ config KVM
select HAVE_KVM_MSI select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_IRQ_ROUTING
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
---help--- ---help---
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple We don't support KVM with 16K page tables yet, due to the multiple
......
...@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o ...@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/irqchip/arm-gic-v4.h>
#define VGIC_V3_MAX_CPUS 255 #define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8 #define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_IRQS_LEGACY 256
...@@ -73,6 +75,9 @@ struct vgic_global { ...@@ -73,6 +75,9 @@ struct vgic_global {
/* Only needed for the legacy KVM_CREATE_IRQCHIP */ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2; bool can_emulate_gicv2;
/* Hardware has GICv4? */
bool has_gicv4;
/* GIC system register CPU interface */ /* GIC system register CPU interface */
struct static_key_false gicv3_cpuif; struct static_key_false gicv3_cpuif;
...@@ -116,6 +121,7 @@ struct vgic_irq { ...@@ -116,6 +121,7 @@ struct vgic_irq {
bool hw; /* Tied to HW IRQ */ bool hw; /* Tied to HW IRQ */
struct kref refcount; /* Used for LPIs */ struct kref refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */ u32 hwintid; /* HW INTID number */
unsigned int host_irq; /* linux irq corresponding to hwintid */
union { union {
u8 targets; /* GICv2 target VCPUs mask */ u8 targets; /* GICv2 target VCPUs mask */
u32 mpidr; /* GICv3 target VCPU */ u32 mpidr; /* GICv3 target VCPU */
...@@ -232,6 +238,15 @@ struct vgic_dist { ...@@ -232,6 +238,15 @@ struct vgic_dist {
/* used by vgic-debug */ /* used by vgic-debug */
struct vgic_state_iter *iter; struct vgic_state_iter *iter;
/*
* GICv4 ITS per-VM data, containing the IRQ domain, the VPE
* array, the property table pointer as well as allocation
* data. This essentially ties the Linux IRQ core and ITS
* together, and avoids leaking KVM's data structures anywhere
* else.
*/
struct its_vm its_vm;
}; };
struct vgic_v2_cpu_if { struct vgic_v2_cpu_if {
...@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if { ...@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if {
u32 vgic_ap0r[4]; u32 vgic_ap0r[4];
u32 vgic_ap1r[4]; u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS]; u64 vgic_lr[VGIC_V3_MAX_LRS];
/*
* GICv4 ITS per-VPE data, containing the doorbell IRQ, the
* pending table pointer, the its_vm pointer and a few other
* HW specific things. As for the its_vm structure, this is
* linking the Linux IRQ subsystem and the ITS together.
*/
struct its_vpe its_vpe;
}; };
struct vgic_cpu { struct vgic_cpu {
...@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void); ...@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner); bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); u32 vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
...@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); ...@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
struct kvm_kernel_irq_routing_entry;
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
#endif /* __KVM_ARM_VGIC_H */ #endif /* __KVM_ARM_VGIC_H */
...@@ -817,9 +817,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -817,9 +817,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct irq_desc *desc;
struct irq_data *data;
int phys_irq;
int ret; int ret;
if (timer->enabled) if (timer->enabled)
...@@ -837,26 +834,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -837,26 +834,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL; return -EINVAL;
} }
/* ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
desc = irq_to_desc(host_vtimer_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}
data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;
phys_irq = data->hwirq;
/*
* Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU.
*/
ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
if (ret) if (ret)
return ret; return ret;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include <kvm/arm_pmu.h> #include <kvm/arm_pmu.h>
...@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
int i; int i;
kvm_vgic_destroy(kvm);
free_percpu(kvm->arch.last_vcpu_ran); free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL; kvm->arch.last_vcpu_ran = NULL;
...@@ -184,8 +188,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -184,8 +188,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL; kvm->vcpus[i] = NULL;
} }
} }
kvm_vgic_destroy(kvm);
} }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
...@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{ {
kvm_timer_schedule(vcpu); kvm_timer_schedule(vcpu);
kvm_vgic_v4_enable_doorbell(vcpu);
} }
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{ {
kvm_timer_unschedule(vcpu); kvm_timer_unschedule(vcpu);
kvm_vgic_v4_disable_doorbell(vcpu);
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...@@ -1450,6 +1454,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) ...@@ -1450,6 +1454,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
return NULL; return NULL;
} }
bool kvm_arch_has_irq_bypass(void)
{
return true;
}
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
&irqfd->irq_entry);
}
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
&irqfd->irq_entry);
}
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
kvm_arm_halt_guest(irqfd->kvm);
}
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
kvm_arm_resume_guest(irqfd->kvm);
}
/** /**
* Initialize Hyp-mode and memory mappings on all CPUs. * Initialize Hyp-mode and memory mappings on all CPUs.
*/ */
......
...@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
} }
} else { } else {
if (static_branch_unlikely(&vgic_v3_cpuif_trap)) if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm)
write_gicreg(0, ICH_HCR_EL2); write_gicreg(0, ICH_HCR_EL2);
cpu_if->vgic_elrsr = 0xffff; cpu_if->vgic_elrsr = 0xffff;
...@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
/* /*
* If we need to trap system registers, we must write * If we need to trap system registers, we must write
* ICH_HCR_EL2 anyway, even if no interrupts are being * ICH_HCR_EL2 anyway, even if no interrupts are being
* injected, * injected. Same thing if GICv4 is used, as VLPI
* delivery is gated by ICH_HCR_EL2.En.
*/ */
if (static_branch_unlikely(&vgic_v3_cpuif_trap)) if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
} }
......
...@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm) ...@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm)
if (ret) if (ret)
goto out; goto out;
ret = vgic_v4_init(kvm);
if (ret)
goto out;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu); kvm_vgic_vcpu_enable(vcpu);
...@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) ...@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
kfree(dist->spis); kfree(dist->spis);
dist->nr_spis = 0; dist->nr_spis = 0;
if (vgic_supports_direct_msis(kvm))
vgic_v4_teardown(kvm);
} }
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
......
This diff is collapsed.
...@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm) ...@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm)
return dist->has_its; return dist->has_its;
} }
bool vgic_supports_direct_msis(struct kvm *kvm)
{
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
}
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len) gpa_t addr, unsigned int len)
{ {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
static bool group0_trap; static bool group0_trap;
static bool group1_trap; static bool group1_trap;
static bool common_trap; static bool common_trap;
static bool gicv4_enable;
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{ {
...@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf) ...@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf)
} }
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg); early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
static int __init early_gicv4_enable(char *buf)
{
return strtobool(buf, &gicv4_enable);
}
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
/** /**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node * @node: pointer to the DT node
...@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.can_emulate_gicv2 = false; kvm_vgic_global_state.can_emulate_gicv2 = false;
kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
/* GICv4 support? */
if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n",
gicv4_enable ? "en" : "dis");
}
if (!info->vcpu.start) { if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n"); kvm_info("GICv3: no GICV resource entry\n");
kvm_vgic_global_state.vcpu_base = 0; kvm_vgic_global_state.vcpu_base = 0;
......
This diff is collapsed.
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include "vgic.h" #include "vgic.h"
...@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, ...@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
return 0; return 0;
} }
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) /* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq)
{ {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); struct irq_desc *desc;
struct irq_data *data;
/*
* Find the physical IRQ number corresponding to @host_irq
*/
desc = irq_to_desc(host_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}
data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
return 0;
}
/* @irq->irq_lock must be held */
static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags; unsigned long flags;
int ret;
BUG_ON(!irq); BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags); spin_lock_irqsave(&irq->irq_lock, flags);
ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
irq->hw = true;
irq->hwintid = phys_irq;
spin_unlock_irqrestore(&irq->irq_lock, flags); spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
return 0; return ret;
} }
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
{ {
struct vgic_irq *irq; struct vgic_irq *irq;
unsigned long flags; unsigned long flags;
...@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) ...@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm))
return -EAGAIN; return -EAGAIN;
irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
BUG_ON(!irq); BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags); spin_lock_irqsave(&irq->irq_lock, flags);
kvm_vgic_unmap_irq(irq);
irq->hw = false;
irq->hwintid = 0;
spin_unlock_irqrestore(&irq->irq_lock, flags); spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
...@@ -688,6 +718,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -688,6 +718,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
WARN_ON(vgic_v4_sync_hwstate(vcpu));
/* An empty ap_list_head implies used_lrs == 0 */ /* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return; return;
...@@ -700,6 +732,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -700,6 +732,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */ /* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{ {
WARN_ON(vgic_v4_flush_hwstate(vcpu));
/* /*
* If there are no virtual interrupts active or pending for this * If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without * VCPU, then there is no work to do and we can bail out without
...@@ -751,6 +785,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) ...@@ -751,6 +785,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
if (!vcpu->kvm->arch.vgic.enabled) if (!vcpu->kvm->arch.vgic.enabled)
return false; return false;
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
return true;
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
...@@ -784,9 +821,9 @@ void vgic_kick_vcpus(struct kvm *kvm) ...@@ -784,9 +821,9 @@ void vgic_kick_vcpus(struct kvm *kvm)
} }
} }
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{ {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
bool map_is_active; bool map_is_active;
unsigned long flags; unsigned long flags;
......
...@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) ...@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
} }
} }
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment