Commit 04a7ea04 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-for-4.13' of...

Merge tag 'kvmarm-for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/ARM updates for 4.13

- vcpu request overhaul
- allow timer and PMU to have their interrupt number
  selected from userspace
- workaround for Cavium erratum 30115
- handling of memory poisonning
- the usual crop of fixes and cleanups

Conflicts:
	arch/s390/include/asm/kvm_host.h
parents c8533544 d38338e3
...@@ -1829,6 +1829,18 @@ ...@@ -1829,6 +1829,18 @@
for all guests. for all guests.
Default is 1 (enabled) if in 64-bit or 32-bit PAE mode. Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
kvm-arm.vgic_v3_group0_trap=
[KVM,ARM] Trap guest accesses to GICv3 group-0
system registers
kvm-arm.vgic_v3_group1_trap=
[KVM,ARM] Trap guest accesses to GICv3 group-1
system registers
kvm-arm.vgic_v3_common_trap=
[KVM,ARM] Trap guest accesses to GICv3 common
system registers
kvm-intel.ept= [KVM,Intel] Disable extended page tables kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips. (virtualized MMU) support on capable Intel chips.
Default is 1 (enabled) Default is 1 (enabled)
......
...@@ -62,6 +62,7 @@ stable kernels. ...@@ -62,6 +62,7 @@ stable kernels.
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A | | Cavium | ThunderX SMMUv2 | #27704 | N/A |
| Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 |
| | | | | | | | | |
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
| | | | | | | | | |
......
...@@ -16,7 +16,9 @@ Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a ...@@ -16,7 +16,9 @@ Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
Returns: -EBUSY: The PMU overflow interrupt is already set Returns: -EBUSY: The PMU overflow interrupt is already set
-ENXIO: The overflow interrupt not set when attempting to get it -ENXIO: The overflow interrupt not set when attempting to get it
-ENODEV: PMUv3 not supported -ENODEV: PMUv3 not supported
-EINVAL: Invalid PMU overflow interrupt number supplied -EINVAL: Invalid PMU overflow interrupt number supplied or
trying to set the IRQ number without using an in-kernel
irqchip.
A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
...@@ -25,11 +27,36 @@ all vcpus, while as an SPI it must be a separate number per vcpu. ...@@ -25,11 +27,36 @@ all vcpus, while as an SPI it must be a separate number per vcpu.
1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT 1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
Parameters: no additional parameter in kvm_device_attr.addr Parameters: no additional parameter in kvm_device_attr.addr
Returns: -ENODEV: PMUv3 not supported Returns: -ENODEV: PMUv3 not supported or GIC not initialized
-ENXIO: PMUv3 not properly configured as required prior to calling this -ENXIO: PMUv3 not properly configured or in-kernel irqchip not
attribute configured as required prior to calling this attribute
-EBUSY: PMUv3 already initialized -EBUSY: PMUv3 already initialized
Request the initialization of the PMUv3. This must be done after creating the Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel
in-kernel irqchip. Creating a PMU with a userspace irqchip is currently not virtual GIC implementation, this must be done after initializing the in-kernel
supported. irqchip.
2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
Architectures: ARM,ARM64
2.1. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_VTIMER
2.2. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_PTIMER
Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
pointer to an int
Returns: -EINVAL: Invalid timer interrupt number
-EBUSY: One or more VCPUs has already run
A value describing the architected timer interrupt number when connected to an
in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
attribute overrides the default values (see below).
KVM_ARM_VCPU_TIMER_IRQ_VTIMER: The EL1 virtual timer intid (default: 27)
KVM_ARM_VCPU_TIMER_IRQ_PTIMER: The EL1 physical timer intid (default: 30)
Setting the same PPI for different timers will prevent the VCPUs from running.
Setting the interrupt number on a VCPU configures all VCPUs created at that
time to use the number provided for a given timer, overwriting any previously
configured values on other VCPUs. Userspace should configure the interrupt
numbers on at least one VCPU after creating all VCPUs and before running any
VCPUs.
This diff is collapsed.
...@@ -44,7 +44,9 @@ ...@@ -44,7 +44,9 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#endif #endif
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
...@@ -233,8 +235,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); ...@@ -233,8 +235,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
...@@ -291,20 +291,12 @@ static inline void kvm_arm_init_debug(void) {} ...@@ -291,20 +291,12 @@ static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr) int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
{ struct kvm_device_attr *attr);
return -ENXIO; int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
} struct kvm_device_attr *attr);
static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr) struct kvm_device_attr *attr);
{
return -ENXIO;
}
static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
return -ENXIO;
}
#endif /* __ARM_KVM_HOST_H__ */ #endif /* __ARM_KVM_HOST_H__ */
...@@ -203,6 +203,14 @@ struct kvm_arch_memory_slot { ...@@ -203,6 +203,14 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0 #define VGIC_LEVEL_INFO_LINE_LEVEL 0
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 #define KVM_DEV_ARM_VGIC_CTRL_INIT 0
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 #define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
......
...@@ -301,3 +301,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -301,3 +301,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{ {
return -EINVAL; return -EINVAL;
} }
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
...@@ -72,6 +72,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -72,6 +72,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_wfx(*vcpu_pc(vcpu), false); trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
} }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
......
...@@ -237,8 +237,10 @@ void __hyp_text __noreturn __hyp_panic(int cause) ...@@ -237,8 +237,10 @@ void __hyp_text __noreturn __hyp_panic(int cause)
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__timer_save_state(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
__deactivate_vm(vcpu); __deactivate_vm(vcpu);
__banked_restore_state(host_ctxt);
__sysreg_restore_state(host_ctxt); __sysreg_restore_state(host_ctxt);
} }
......
...@@ -104,7 +104,6 @@ __do_hyp_init: ...@@ -104,7 +104,6 @@ __do_hyp_init:
@ - Write permission implies XN: disabled @ - Write permission implies XN: disabled
@ - Instruction cache: enabled @ - Instruction cache: enabled
@ - Data/Unified cache: enabled @ - Data/Unified cache: enabled
@ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping) @ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK ldr r2, =HSCTLR_MASK
...@@ -112,8 +111,8 @@ __do_hyp_init: ...@@ -112,8 +111,8 @@ __do_hyp_init:
mrc p15, 0, r1, c1, c0, 0 @ SCTLR mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2 and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2 orr r1, r1, r2
orr r0, r0, r1 orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR mcr p15, 4, r0, c1, c0, 0 @ HSCR
......
...@@ -37,16 +37,6 @@ static struct kvm_regs cortexa_regs_reset = { ...@@ -37,16 +37,6 @@ static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
}; };
static const struct kvm_irq_level cortexa_ptimer_irq = {
{ .irq = 30 },
.level = 1,
};
static const struct kvm_irq_level cortexa_vtimer_irq = {
{ .irq = 27 },
.level = 1,
};
/******************************************************************************* /*******************************************************************************
* Exported reset function * Exported reset function
...@@ -62,16 +52,12 @@ static const struct kvm_irq_level cortexa_vtimer_irq = { ...@@ -62,16 +52,12 @@ static const struct kvm_irq_level cortexa_vtimer_irq = {
int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
struct kvm_regs *reset_regs; struct kvm_regs *reset_regs;
const struct kvm_irq_level *cpu_vtimer_irq;
const struct kvm_irq_level *cpu_ptimer_irq;
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A7: case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15: case KVM_ARM_TARGET_CORTEX_A15:
reset_regs = &cortexa_regs_reset; reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id(); vcpu->arch.midr = read_cpuid_id();
cpu_vtimer_irq = &cortexa_vtimer_irq;
cpu_ptimer_irq = &cortexa_ptimer_irq;
break; break;
default: default:
return -ENODEV; return -ENODEV;
...@@ -84,5 +70,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -84,5 +70,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_coprocs(vcpu); kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */ /* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq); return kvm_timer_vcpu_reset(vcpu);
} }
...@@ -480,6 +480,17 @@ config CAVIUM_ERRATUM_27456 ...@@ -480,6 +480,17 @@ config CAVIUM_ERRATUM_27456
If unsure, say Y. If unsure, say Y.
config CAVIUM_ERRATUM_30115
bool "Cavium erratum 30115: Guest may disable interrupts in host"
default y
help
On ThunderX T88 pass 1.x through 2.2, T81 pass 1.0 through
1.2, and T83 Pass 1.0, KVM guest execution may disable
interrupts in host. Trapping both GICv3 group-0 and group-1
accesses sidesteps the issue.
If unsure, say Y.
config QCOM_FALKOR_ERRATUM_1003 config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change" bool "Falkor E1003: Incorrect translation due to ASID change"
default y default y
......
...@@ -89,7 +89,7 @@ static inline void gic_write_ctlr(u32 val) ...@@ -89,7 +89,7 @@ static inline void gic_write_ctlr(u32 val)
static inline void gic_write_grpen1(u32 val) static inline void gic_write_grpen1(u32 val)
{ {
write_sysreg_s(val, SYS_ICC_GRPEN1_EL1); write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
isb(); isb();
} }
......
...@@ -38,7 +38,8 @@ ...@@ -38,7 +38,8 @@
#define ARM64_WORKAROUND_REPEAT_TLBI 17 #define ARM64_WORKAROUND_REPEAT_TLBI 17
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
#define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_858921 19
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_NCAPS 20 #define ARM64_NCAPS 21
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
#define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define BRCM_CPU_PART_VULCAN 0x516 #define BRCM_CPU_PART_VULCAN 0x516
...@@ -96,6 +97,7 @@ ...@@ -96,6 +97,7 @@
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define __ASM_ESR_H #define __ASM_ESR_H
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/sysreg.h>
#define ESR_ELx_EC_UNKNOWN (0x00) #define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_ELx_EC_WFx (0x01) #define ESR_ELx_EC_WFx (0x01)
...@@ -181,6 +182,29 @@ ...@@ -181,6 +182,29 @@
#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \ #define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ) ESR_ELx_SYS64_ISS_DIR_READ)
#define esr_sys64_to_sysreg(e) \
sys_reg((((e) & ESR_ELx_SYS64_ISS_OP0_MASK) >> \
ESR_ELx_SYS64_ISS_OP0_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
ESR_ELx_SYS64_ISS_OP1_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
ESR_ELx_SYS64_ISS_CRN_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
ESR_ELx_SYS64_ISS_CRM_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
ESR_ELx_SYS64_ISS_OP2_SHIFT))
#define esr_cp15_to_sysreg(e) \
sys_reg(3, \
(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
ESR_ELx_SYS64_ISS_OP1_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
ESR_ELx_SYS64_ISS_CRN_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
ESR_ELx_SYS64_ISS_CRM_SHIFT), \
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
ESR_ELx_SYS64_ISS_OP2_SHIFT))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/types.h> #include <asm/types.h>
......
...@@ -42,7 +42,9 @@ ...@@ -42,7 +42,9 @@
#define KVM_VCPU_MAX_FEATURES 4 #define KVM_VCPU_MAX_FEATURES 4
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
...@@ -334,8 +336,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); ...@@ -334,8 +336,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...); u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
......
...@@ -127,6 +127,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); ...@@ -127,6 +127,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_save_state(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu);
......
...@@ -180,14 +180,31 @@ ...@@ -180,14 +180,31 @@
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) #define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) #define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) #define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) #define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3) #define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) #define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) #define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) #define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) #define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
...@@ -286,6 +303,10 @@ ...@@ -286,6 +303,10 @@
#define SCTLR_ELx_A (1 << 1) #define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1 #define SCTLR_ELx_M 1
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
(1 << 29))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I) SCTLR_ELx_SA | SCTLR_ELx_I)
......
...@@ -232,6 +232,9 @@ struct kvm_arch_memory_slot { ...@@ -232,6 +232,9 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_INIT 1
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
/* KVM_IRQ_LINE irq field index values */ /* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_SHIFT 24
......
...@@ -132,6 +132,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -132,6 +132,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_CAVIUM_27456, .capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
}, },
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
{
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
.desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 2),
},
{
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
.desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
},
{
/* Cavium ThunderX, T83 pass 1.0 */
.desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
},
#endif #endif
{ {
.desc = "Mismatched cache line size", .desc = "Mismatched cache line size",
......
...@@ -390,6 +390,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, ...@@ -390,6 +390,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL: case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
break; break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -407,6 +410,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, ...@@ -407,6 +410,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL: case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
break; break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -424,6 +430,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, ...@@ -424,6 +430,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL: case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
break; break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
......
...@@ -89,6 +89,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -89,6 +89,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
} }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
......
...@@ -106,10 +106,13 @@ __do_hyp_init: ...@@ -106,10 +106,13 @@ __do_hyp_init:
tlbi alle2 tlbi alle2
dsb sy dsb sy
mrs x4, sctlr_el2 /*
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 * Preserve all the RES1 bits while setting the default flags,
ldr x5, =SCTLR_ELx_FLAGS * as well as the EE bit on BE. Drop the A flag since the compiler
orr x4, x4, x5 * is allowed to generate unaligned accesses.
*/
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4 msr sctlr_el2, x4
isb isb
......
...@@ -350,6 +350,20 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -350,6 +350,20 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
} }
} }
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
exit_code == ARM_EXCEPTION_TRAP &&
(kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu);
if (ret == 1) {
__skip_instr(vcpu);
goto again;
}
/* 0 falls through to be handled out of EL2 */
}
fp_enabled = __fpsimd_enabled(); fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt); __sysreg_save_guest_state(guest_ctxt);
...@@ -422,6 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void) ...@@ -422,6 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void)
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__timer_save_state(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
__deactivate_vm(vcpu); __deactivate_vm(vcpu);
__sysreg_restore_host_state(host_ctxt); __sysreg_restore_host_state(host_ctxt);
......
...@@ -46,16 +46,6 @@ static const struct kvm_regs default_regs_reset32 = { ...@@ -46,16 +46,6 @@ static const struct kvm_regs default_regs_reset32 = {
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
}; };
static const struct kvm_irq_level default_ptimer_irq = {
.irq = 30,
.level = 1,
};
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
};
static bool cpu_has_32bit_el1(void) static bool cpu_has_32bit_el1(void)
{ {
u64 pfr0; u64 pfr0;
...@@ -108,8 +98,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -108,8 +98,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
*/ */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
const struct kvm_irq_level *cpu_vtimer_irq;
const struct kvm_irq_level *cpu_ptimer_irq;
const struct kvm_regs *cpu_reset; const struct kvm_regs *cpu_reset;
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
...@@ -122,8 +110,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -122,8 +110,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
cpu_reset = &default_regs_reset; cpu_reset = &default_regs_reset;
} }
cpu_vtimer_irq = &default_vtimer_irq;
cpu_ptimer_irq = &default_ptimer_irq;
break; break;
} }
...@@ -137,5 +123,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -137,5 +123,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_pmu_vcpu_reset(vcpu); kvm_pmu_vcpu_reset(vcpu);
/* Reset timer */ /* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq); return kvm_timer_vcpu_reset(vcpu);
} }
...@@ -56,7 +56,8 @@ ...@@ -56,7 +56,8 @@
*/ */
static bool read_from_write_only(struct kvm_vcpu *vcpu, static bool read_from_write_only(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params) struct sys_reg_params *params,
const struct sys_reg_desc *r)
{ {
WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n"); WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
print_sys_reg_instr(params); print_sys_reg_instr(params);
...@@ -64,6 +65,16 @@ static bool read_from_write_only(struct kvm_vcpu *vcpu, ...@@ -64,6 +65,16 @@ static bool read_from_write_only(struct kvm_vcpu *vcpu,
return false; return false;
} }
static bool write_to_read_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
{
WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
return false;
}
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
static u32 cache_levels; static u32 cache_levels;
...@@ -93,7 +104,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, ...@@ -93,7 +104,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p); return read_from_write_only(vcpu, p, r);
kvm_set_way_flush(vcpu); kvm_set_way_flush(vcpu);
return true; return true;
...@@ -135,7 +146,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, ...@@ -135,7 +146,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p); return read_from_write_only(vcpu, p, r);
vgic_v3_dispatch_sgi(vcpu, p->regval); vgic_v3_dispatch_sgi(vcpu, p->regval);
...@@ -773,7 +784,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -773,7 +784,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return trap_raz_wi(vcpu, p, r); return trap_raz_wi(vcpu, p, r);
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p); return read_from_write_only(vcpu, p, r);
if (pmu_write_swinc_el0_disabled(vcpu)) if (pmu_write_swinc_el0_disabled(vcpu))
return false; return false;
...@@ -953,7 +964,15 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -953,7 +964,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
......
...@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout. * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout. * The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/ */
vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else { } else {
val = 0; val = 0;
...@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout. * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions. * Extract it directly using ICC_CTLR_EL1 reg definitions.
*/ */
val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
p->regval = val; p->regval = val;
} }
...@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p->regval = 0; p->regval = 0;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { if (!vmcr.cbpr) {
if (p->is_write) { if (p->is_write) {
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT; ICC_BPR1_EL1_SHIFT;
...@@ -268,36 +268,21 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -268,36 +268,21 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
/* ICC_PMR_EL1 */ { SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
{ Op0(3), Op1(0), CRn(4), CRm(6), Op2(0), access_gic_pmr }, { SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
/* ICC_BPR0_EL1 */ { SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
{ Op0(3), Op1(0), CRn(12), CRm(8), Op2(3), access_gic_bpr0 }, { SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
/* ICC_AP0R0_EL1 */ { SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
{ Op0(3), Op1(0), CRn(12), CRm(8), Op2(4), access_gic_ap0r }, { SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
/* ICC_AP0R1_EL1 */ { SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
{ Op0(3), Op1(0), CRn(12), CRm(8), Op2(5), access_gic_ap0r }, { SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
/* ICC_AP0R2_EL1 */ { SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
{ Op0(3), Op1(0), CRn(12), CRm(8), Op2(6), access_gic_ap0r }, { SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
/* ICC_AP0R3_EL1 */ { SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
{ Op0(3), Op1(0), CRn(12), CRm(8), Op2(7), access_gic_ap0r }, { SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
/* ICC_AP1R0_EL1 */ { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
{ Op0(3), Op1(0), CRn(12), CRm(9), Op2(0), access_gic_ap1r }, { SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
/* ICC_AP1R1_EL1 */ { SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
{ Op0(3), Op1(0), CRn(12), CRm(9), Op2(1), access_gic_ap1r },
/* ICC_AP1R2_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(9), Op2(2), access_gic_ap1r },
/* ICC_AP1R3_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(9), Op2(3), access_gic_ap1r },
/* ICC_BPR1_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(12), Op2(3), access_gic_bpr1 },
/* ICC_CTLR_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(12), Op2(4), access_gic_ctlr },
/* ICC_SRE_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(12), Op2(5), access_gic_sre },
/* ICC_IGRPEN0_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(12), Op2(6), access_gic_grpen0 },
/* ICC_GRPEN1_EL1 */
{ Op0(3), Op1(0), CRn(12), CRm(12), Op2(7), access_gic_grpen1 },
}; };
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
......
...@@ -1094,7 +1094,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, ...@@ -1094,7 +1094,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
struct mm_struct *mm; struct mm_struct *mm;
int i; int i;
if (likely(!vcpu->requests)) if (likely(!kvm_request_pending(vcpu)))
return; return;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
......
...@@ -2337,7 +2337,7 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) ...@@ -2337,7 +2337,7 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
int ret = 0; int ret = 0;
int i; int i;
if (!vcpu->requests) if (!kvm_request_pending(vcpu))
return 0; return 0;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
......
...@@ -52,8 +52,8 @@ ...@@ -52,8 +52,8 @@
#define KVM_IRQCHIP_NUM_PINS 256 #define KVM_IRQCHIP_NUM_PINS 256
/* PPC-specific vcpu->requests bit members */ /* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG 8 #define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT 9 #define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
......
...@@ -687,7 +687,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -687,7 +687,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
kvmppc_core_check_exceptions(vcpu); kvmppc_core_check_exceptions(vcpu);
if (vcpu->requests) { if (kvm_request_pending(vcpu)) {
/* Exception delivery raised request; start over */ /* Exception delivery raised request; start over */
return 1; return 1;
} }
......
...@@ -55,8 +55,7 @@ EXPORT_SYMBOL_GPL(kvmppc_pr_ops); ...@@ -55,8 +55,7 @@ EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{ {
return !!(v->arch.pending_exceptions) || return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
v->requests;
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
...@@ -108,7 +107,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -108,7 +107,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
*/ */
smp_mb(); smp_mb();
if (vcpu->requests) { if (kvm_request_pending(vcpu)) {
/* Make sure we process requests preemptable */ /* Make sure we process requests preemptable */
local_irq_enable(); local_irq_enable();
trace_kvm_check_requests(vcpu); trace_kvm_check_requests(vcpu);
......
...@@ -42,11 +42,11 @@ ...@@ -42,11 +42,11 @@
#define KVM_HALT_POLL_NS_DEFAULT 80000 #define KVM_HALT_POLL_NS_DEFAULT 80000
/* s390-specific vcpu->requests bit members */ /* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS 8 #define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0)
#define KVM_REQ_DISABLE_IBS 9 #define KVM_REQ_DISABLE_IBS KVM_ARCH_REQ(1)
#define KVM_REQ_ICPT_OPEREXC 10 #define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION 11 #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION 12 #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
#define SIGP_CTRL_C 0x80 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f #define SIGP_CTRL_SCN_MASK 0x3f
......
...@@ -2777,7 +2777,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -2777,7 +2777,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{ {
retry: retry:
kvm_s390_vcpu_request_handled(vcpu); kvm_s390_vcpu_request_handled(vcpu);
if (!vcpu->requests) if (!kvm_request_pending(vcpu))
return 0; return 0;
/* /*
* We use MMU_RELOAD just to re-arm the ipte notifier for the * We use MMU_RELOAD just to re-arm the ipte notifier for the
......
...@@ -48,28 +48,31 @@ ...@@ -48,28 +48,31 @@
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
/* x86-specific vcpu->requests bit members */ /* x86-specific vcpu->requests bit members */
#define KVM_REQ_MIGRATE_TIMER 8 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS 9 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT 10 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC 11 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE 12 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
#define KVM_REQ_EVENT 14 #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT 15 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE 16 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
#define KVM_REQ_NMI 17 #define KVM_REQ_NMI KVM_ARCH_REQ(9)
#define KVM_REQ_PMU 18 #define KVM_REQ_PMU KVM_ARCH_REQ(10)
#define KVM_REQ_PMI 19 #define KVM_REQ_PMI KVM_ARCH_REQ(11)
#define KVM_REQ_SMI 20 #define KVM_REQ_SMI KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE 21 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS (22 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MCLOCK_INPROGRESS \
#define KVM_REQ_SCAN_IOAPIC (23 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24 #define KVM_REQ_SCAN_IOAPIC \
#define KVM_REQ_APIC_PAGE_RELOAD (25 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH 26 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
#define KVM_REQ_IOAPIC_EOI_EXIT 27 #define KVM_REQ_APIC_PAGE_RELOAD \
#define KVM_REQ_HV_RESET 28 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_EXIT 29 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
#define KVM_REQ_HV_STIMER 30 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define CR0_RESERVED_BITS \ #define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
......
...@@ -6731,7 +6731,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6731,7 +6731,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_immediate_exit = false; bool req_immediate_exit = false;
if (vcpu->requests) { if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
...@@ -6895,7 +6895,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6895,7 +6895,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_x86_ops->sync_pir_to_irr(vcpu);
} }
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
|| need_resched() || signal_pending(current)) { || need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
......
...@@ -57,9 +57,7 @@ struct arch_timer_cpu { ...@@ -57,9 +57,7 @@ struct arch_timer_cpu {
int kvm_timer_hyp_init(void); int kvm_timer_hyp_init(void);
int kvm_timer_enable(struct kvm_vcpu *vcpu); int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
const struct kvm_irq_level *virt_irq,
const struct kvm_irq_level *phys_irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
...@@ -70,6 +68,10 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); ...@@ -70,6 +68,10 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
void kvm_timer_schedule(struct kvm_vcpu *vcpu); void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu); void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
......
...@@ -35,6 +35,7 @@ struct kvm_pmu { ...@@ -35,6 +35,7 @@ struct kvm_pmu {
int irq_num; int irq_num;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
bool ready; bool ready;
bool created;
bool irq_level; bool irq_level;
}; };
...@@ -63,6 +64,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, ...@@ -63,6 +64,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
#else #else
struct kvm_pmu { struct kvm_pmu {
}; };
...@@ -112,6 +114,10 @@ static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, ...@@ -112,6 +114,10 @@ static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
{ {
return -ENXIO; return -ENXIO;
} }
static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif #endif
#endif #endif
...@@ -38,6 +38,10 @@ ...@@ -38,6 +38,10 @@
#define VGIC_MIN_LPI 8192 #define VGIC_MIN_LPI 8192
#define KVM_IRQCHIP_NUM_PINS (1020 - 32) #define KVM_IRQCHIP_NUM_PINS (1020 - 32)
#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
(irq) <= VGIC_MAX_SPI)
enum vgic_type { enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */ VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */ VGIC_V3, /* New fancy GICv3 */
...@@ -119,6 +123,9 @@ struct vgic_irq { ...@@ -119,6 +123,9 @@ struct vgic_irq {
u8 source; /* GICv2 SGIs only */ u8 source; /* GICv2 SGIs only */
u8 priority; u8 priority;
enum vgic_irq_config config; /* Level or edge */ enum vgic_irq_config config; /* Level or edge */
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
}; };
struct vgic_register_region; struct vgic_register_region;
...@@ -285,6 +292,7 @@ struct vgic_cpu { ...@@ -285,6 +292,7 @@ struct vgic_cpu {
}; };
extern struct static_key_false vgic_v2_cpuif_trap; extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm); void kvm_vgic_early_init(struct kvm *kvm);
...@@ -298,9 +306,7 @@ int kvm_vgic_hyp_init(void); ...@@ -298,9 +306,7 @@ int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void); void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level); bool level, void *owner);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
...@@ -341,4 +347,6 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); ...@@ -341,4 +347,6 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
*/ */
int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
#endif /* __KVM_ARM_VGIC_H */ #endif /* __KVM_ARM_VGIC_H */
...@@ -405,6 +405,7 @@ ...@@ -405,6 +405,7 @@
#define ICH_LR_PHYS_ID_SHIFT 32 #define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_LR_PRIORITY_SHIFT 48 #define ICH_LR_PRIORITY_SHIFT 48
#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
/* These are for GICv2 emulation only */ /* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0) #define GICH_LR_VIRTUALID (0x3ffUL << 0)
...@@ -416,7 +417,16 @@ ...@@ -416,7 +417,16 @@
#define ICH_HCR_EN (1 << 0) #define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1) #define ICH_HCR_UIE (1 << 1)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
#define ICH_HCR_EOIcount_SHIFT 27
#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
#define ICH_VMCR_FIQ_EN_SHIFT 3
#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
#define ICH_VMCR_CBPR_SHIFT 4 #define ICH_VMCR_CBPR_SHIFT 4
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
#define ICH_VMCR_EOIM_SHIFT 9 #define ICH_VMCR_EOIM_SHIFT 9
......
...@@ -25,7 +25,18 @@ ...@@ -25,7 +25,18 @@
#define GICC_ENABLE 0x1 #define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0 #define GICC_INT_PRI_THRESHOLD 0xf0
#define GIC_CPU_CTRL_EOImodeNS (1 << 9) #define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
#define GIC_CPU_CTRL_AckCtl_SHIFT 2
#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
#define GIC_CPU_CTRL_FIQEn_SHIFT 3
#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
#define GIC_CPU_CTRL_CBPR_SHIFT 4
#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
#define GICC_IAR_INT_ID_MASK 0x3ff #define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023 #define GICC_INT_SPURIOUS 1023
...@@ -84,8 +95,19 @@ ...@@ -84,8 +95,19 @@
#define GICH_LR_EOI (1 << 19) #define GICH_LR_EOI (1 << 19)
#define GICH_LR_HW (1 << 31) #define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0 #define GICH_VMCR_ENABLE_GRP0_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) #define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
#define GICH_VMCR_ACK_CTL_SHIFT 2
#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
#define GICH_VMCR_FIQ_EN_SHIFT 3
#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
#define GICH_VMCR_CBPR_SHIFT 4
#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
#define GICH_VMCR_EOI_MODE_SHIFT 9
#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
#define GICH_VMCR_PRIMASK_SHIFT 27 #define GICH_VMCR_PRIMASK_SHIFT 27
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
#define GICH_VMCR_BINPOINT_SHIFT 21 #define GICH_VMCR_BINPOINT_SHIFT 21
......
...@@ -126,6 +126,13 @@ static inline bool is_error_page(struct page *page) ...@@ -126,6 +126,13 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2 #define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3 #define KVM_REQ_UNHALT 3
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
...@@ -1098,6 +1105,11 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) ...@@ -1098,6 +1105,11 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, &vcpu->requests); set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
} }
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
}
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{ {
return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests); return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
......
...@@ -60,7 +60,7 @@ static const unsigned short cc_map[16] = { ...@@ -60,7 +60,7 @@ static const unsigned short cc_map[16] = {
/* /*
* Check if a trapped instruction should have been executed or not. * Check if a trapped instruction should have been executed or not.
*/ */
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{ {
unsigned long cpsr; unsigned long cpsr;
u32 cpsr_cond; u32 cpsr_cond;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h> #include <asm/arch_timer.h>
...@@ -35,6 +36,16 @@ static struct timecounter *timecounter; ...@@ -35,6 +36,16 @@ static struct timecounter *timecounter;
static unsigned int host_vtimer_irq; static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags; static u32 host_vtimer_irq_flags;
static const struct kvm_irq_level default_ptimer_irq = {
.irq = 30,
.level = 1,
};
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
};
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{ {
vcpu_vtimer(vcpu)->active_cleared_last = false; vcpu_vtimer(vcpu)->active_cleared_last = false;
...@@ -95,7 +106,7 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) ...@@ -95,7 +106,7 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
* If the vcpu is blocked we want to wake it up so that it will see * If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest. * the timer has expired when entering the guest.
*/ */
kvm_vcpu_kick(vcpu); kvm_vcpu_wake_up(vcpu);
} }
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
...@@ -215,7 +226,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, ...@@ -215,7 +226,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
if (likely(irqchip_in_kernel(vcpu->kvm))) { if (likely(irqchip_in_kernel(vcpu->kvm))) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq, timer_ctx->irq.irq,
timer_ctx->irq.level); timer_ctx->irq.level,
timer_ctx);
WARN_ON(ret); WARN_ON(ret);
} }
} }
...@@ -445,22 +457,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -445,22 +457,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
kvm_timer_update_state(vcpu); kvm_timer_update_state(vcpu);
} }
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
const struct kvm_irq_level *virt_irq,
const struct kvm_irq_level *phys_irq)
{ {
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/*
* The vcpu timer irq number cannot be determined in
* kvm_timer_vcpu_init() because it is called much before
* kvm_vcpu_set_target(). To handle this, we determine
* vcpu timer irq number when the vcpu is reset.
*/
vtimer->irq.irq = virt_irq->irq;
ptimer->irq.irq = phys_irq->irq;
/* /*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
* and to 0 for ARMv7. We provide an implementation that always * and to 0 for ARMv7. We provide an implementation that always
...@@ -496,6 +497,8 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) ...@@ -496,6 +497,8 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/* Synchronize cntvoff across all vtimers of a VM. */ /* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
...@@ -504,6 +507,9 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -504,6 +507,9 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->timer.function = kvm_timer_expire; timer->timer.function = kvm_timer_expire;
vtimer->irq.irq = default_vtimer_irq.irq;
ptimer->irq.irq = default_ptimer_irq.irq;
} }
static void kvm_timer_init_interrupt(void *info) static void kvm_timer_init_interrupt(void *info)
...@@ -613,6 +619,30 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) ...@@ -613,6 +619,30 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
} }
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
{
int vtimer_irq, ptimer_irq;
int i, ret;
vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
if (ret)
return false;
ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
if (ret)
return false;
kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
return false;
}
return true;
}
int kvm_timer_enable(struct kvm_vcpu *vcpu) int kvm_timer_enable(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
...@@ -632,6 +662,11 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -632,6 +662,11 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm))
return -ENODEV; return -ENODEV;
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
}
/* /*
* Find the physical IRQ number corresponding to the host_vtimer_irq * Find the physical IRQ number corresponding to the host_vtimer_irq
*/ */
...@@ -681,3 +716,79 @@ void kvm_timer_init_vhe(void) ...@@ -681,3 +716,79 @@ void kvm_timer_init_vhe(void)
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
write_sysreg(val, cnthctl_el2); write_sysreg(val, cnthctl_el2);
} }
static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
{
struct kvm_vcpu *vcpu;
int i;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
}
}
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
int __user *uaddr = (int __user *)(long)attr->addr;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
int irq;
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
if (get_user(irq, uaddr))
return -EFAULT;
if (!(irq_is_ppi(irq)))
return -EINVAL;
if (vcpu->arch.timer_cpu.enabled)
return -EBUSY;
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
break;
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
break;
default:
return -ENXIO;
}
return 0;
}
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
int __user *uaddr = (int __user *)(long)attr->addr;
struct arch_timer_context *timer;
int irq;
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
timer = vcpu_vtimer(vcpu);
break;
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
timer = vcpu_ptimer(vcpu);
break;
default:
return -ENXIO;
}
irq = timer->irq.irq;
return put_user(irq, uaddr);
}
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
switch (attr->attr) {
case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
return 0;
}
return -ENXIO;
}
...@@ -368,6 +368,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -368,6 +368,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_put(vcpu); kvm_timer_vcpu_put(vcpu);
} }
static void vcpu_power_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.power_off = true;
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
...@@ -387,7 +394,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -387,7 +394,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu->arch.power_off = false; vcpu->arch.power_off = false;
break; break;
case KVM_MP_STATE_STOPPED: case KVM_MP_STATE_STOPPED:
vcpu->arch.power_off = true; vcpu_power_off(vcpu);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -520,6 +527,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -520,6 +527,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
} }
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
if (ret)
return ret;
ret = kvm_arm_pmu_v3_enable(vcpu);
return ret; return ret;
} }
...@@ -536,21 +547,7 @@ void kvm_arm_halt_guest(struct kvm *kvm) ...@@ -536,21 +547,7 @@ void kvm_arm_halt_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true; vcpu->arch.pause = true;
kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT); kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
}
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
{
vcpu->arch.pause = true;
kvm_vcpu_kick(vcpu);
}
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false;
swake_up(wq);
} }
void kvm_arm_resume_guest(struct kvm *kvm) void kvm_arm_resume_guest(struct kvm *kvm)
...@@ -558,16 +555,23 @@ void kvm_arm_resume_guest(struct kvm *kvm) ...@@ -558,16 +555,23 @@ void kvm_arm_resume_guest(struct kvm *kvm)
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_arm_resume_vcpu(vcpu); vcpu->arch.pause = false;
swake_up(kvm_arch_vcpu_wq(vcpu));
}
} }
static void vcpu_sleep(struct kvm_vcpu *vcpu) static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{ {
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause))); (!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
/* Awaken to handle a signal, request we sleep again later. */
kvm_make_request(KVM_REQ_SLEEP, vcpu);
}
} }
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
...@@ -575,6 +579,20 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) ...@@ -575,6 +579,20 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
return vcpu->arch.target >= 0; return vcpu->arch.target >= 0;
} }
static void check_vcpu_requests(struct kvm_vcpu *vcpu)
{
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
vcpu_req_sleep(vcpu);
/*
* Clear IRQ_PENDING requests that were made to guarantee
* that a VCPU sees new virtual interrupts.
*/
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
}
}
/** /**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
...@@ -620,8 +638,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -620,8 +638,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
update_vttbr(vcpu->kvm); update_vttbr(vcpu->kvm);
if (vcpu->arch.power_off || vcpu->arch.pause) check_vcpu_requests(vcpu);
vcpu_sleep(vcpu);
/* /*
* Preparing the interrupts to be injected also * Preparing the interrupts to be injected also
...@@ -650,8 +667,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -650,8 +667,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
} }
/*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
* See the comment in kvm_vcpu_exiting_guest_mode() and
* Documentation/virtual/kvm/vcpu-requests.rst
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
vcpu->arch.power_off || vcpu->arch.pause) { kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable(); local_irq_enable();
kvm_pmu_sync_hwstate(vcpu); kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu);
...@@ -667,7 +693,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -667,7 +693,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/ */
trace_kvm_entry(*vcpu_pc(vcpu)); trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff(); guest_enter_irqoff();
vcpu->mode = IN_GUEST_MODE;
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
...@@ -756,6 +781,7 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) ...@@ -756,6 +781,7 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
* trigger a world-switch round on the running physical CPU to set the * trigger a world-switch round on the running physical CPU to set the
* virtual IRQ/FIQ fields in the HCR appropriately. * virtual IRQ/FIQ fields in the HCR appropriately.
*/ */
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
return 0; return 0;
...@@ -806,7 +832,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ...@@ -806,7 +832,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
return -EINVAL; return -EINVAL;
return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
case KVM_ARM_IRQ_TYPE_SPI: case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm)) if (!irqchip_in_kernel(kvm))
return -ENXIO; return -ENXIO;
...@@ -814,7 +840,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ...@@ -814,7 +840,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_PRIVATE_IRQS) if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL; return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, irq_num, level); return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
} }
return -EINVAL; return -EINVAL;
...@@ -884,7 +910,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, ...@@ -884,7 +910,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Handle the "start in power-off" case. * Handle the "start in power-off" case.
*/ */
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
vcpu->arch.power_off = true; vcpu_power_off(vcpu);
else else
vcpu->arch.power_off = false; vcpu->arch.power_off = false;
...@@ -1115,9 +1141,6 @@ static void cpu_init_hyp_mode(void *dummy) ...@@ -1115,9 +1141,6 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2(); __cpu_init_stage2();
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
kvm_arm_init_debug(); kvm_arm_init_debug();
} }
...@@ -1137,6 +1160,7 @@ static void cpu_hyp_reinit(void) ...@@ -1137,6 +1160,7 @@ static void cpu_hyp_reinit(void)
* event was cancelled before the CPU was reset. * event was cancelled before the CPU was reset.
*/ */
__cpu_init_stage2(); __cpu_init_stage2();
kvm_timer_init_vhe();
} else { } else {
cpu_init_hyp_mode(NULL); cpu_init_hyp_mode(NULL);
} }
......
This diff is collapsed.
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/sched/signal.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -879,6 +880,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -879,6 +880,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd; pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr); pud = stage2_get_pud(kvm, cache, addr);
if (!pud)
return NULL;
if (stage2_pud_none(*pud)) { if (stage2_pud_none(*pud)) {
if (!cache) if (!cache)
return NULL; return NULL;
...@@ -1258,6 +1262,24 @@ static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, ...@@ -1258,6 +1262,24 @@ static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
__coherent_cache_guest_page(vcpu, pfn, size); __coherent_cache_guest_page(vcpu, pfn, size);
} }
static void kvm_send_hwpoison_signal(unsigned long address,
struct vm_area_struct *vma)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_MCEERR_AR;
info.si_addr = (void __user *)address;
if (is_vm_hugetlb_page(vma))
info.si_addr_lsb = huge_page_shift(hstate_vma(vma));
else
info.si_addr_lsb = PAGE_SHIFT;
send_sig_info(SIGBUS, &info, current);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva, struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status) unsigned long fault_status)
...@@ -1327,6 +1349,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1327,6 +1349,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
smp_rmb(); smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma);
return 0;
}
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;
......
...@@ -203,6 +203,24 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) ...@@ -203,6 +203,24 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
return reg; return reg;
} }
static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow = !!kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;
pmu->irq_level = overflow;
if (likely(irqchip_in_kernel(vcpu->kvm))) {
int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
pmu->irq_num, overflow,
&vcpu->arch.pmu);
WARN_ON(ret);
}
}
/** /**
* kvm_pmu_overflow_set - set PMU overflow interrupt * kvm_pmu_overflow_set - set PMU overflow interrupt
* @vcpu: The vcpu pointer * @vcpu: The vcpu pointer
...@@ -210,37 +228,18 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) ...@@ -210,37 +228,18 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
*/ */
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
{ {
u64 reg;
if (val == 0) if (val == 0)
return; return;
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val; vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
reg = kvm_pmu_overflow_status(vcpu); kvm_pmu_check_overflow(vcpu);
if (reg != 0)
kvm_vcpu_kick(vcpu);
} }
static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow;
if (!kvm_arm_pmu_v3_ready(vcpu)) if (!kvm_arm_pmu_v3_ready(vcpu))
return; return;
kvm_pmu_check_overflow(vcpu);
overflow = !!kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;
pmu->irq_level = overflow;
if (likely(irqchip_in_kernel(vcpu->kvm))) {
int ret;
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
pmu->irq_num, overflow);
WARN_ON(ret);
}
} }
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
...@@ -451,34 +450,74 @@ bool kvm_arm_support_pmu_v3(void) ...@@ -451,34 +450,74 @@ bool kvm_arm_support_pmu_v3(void)
return (perf_num_counters() > 0); return (perf_num_counters() > 0);
} }
static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{ {
if (!kvm_arm_support_pmu_v3()) if (!vcpu->arch.pmu.created)
return -ENODEV; return 0;
/* /*
* We currently require an in-kernel VGIC to use the PMU emulation, * A valid interrupt configuration for the PMU is either to have a
* because we do not support forwarding PMU overflow interrupts to * properly configured interrupt number and using an in-kernel
* userspace yet. * irqchip, or to not have an in-kernel GIC and not set an IRQ.
*/ */
if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm)) {
int irq = vcpu->arch.pmu.irq_num;
if (!kvm_arm_pmu_irq_initialized(vcpu))
return -EINVAL;
/*
* If we are using an in-kernel vgic, at this point we know
* the vgic will be initialized, so we can check the PMU irq
* number against the dimensions of the vgic and make sure
* it's valid.
*/
if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
return -EINVAL;
} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
return -EINVAL;
}
kvm_pmu_vcpu_reset(vcpu);
vcpu->arch.pmu.ready = true;
return 0;
}
static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
{
if (!kvm_arm_support_pmu_v3())
return -ENODEV; return -ENODEV;
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
!kvm_arm_pmu_irq_initialized(vcpu))
return -ENXIO; return -ENXIO;
if (kvm_arm_pmu_v3_ready(vcpu)) if (vcpu->arch.pmu.created)
return -EBUSY; return -EBUSY;
kvm_pmu_vcpu_reset(vcpu); if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.pmu.ready = true; int ret;
/*
* If using the PMU with an in-kernel virtual GIC
* implementation, we require the GIC to be already
* initialized when initializing the PMU.
*/
if (!vgic_initialized(vcpu->kvm))
return -ENODEV;
if (!kvm_arm_pmu_irq_initialized(vcpu))
return -ENXIO;
ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
&vcpu->arch.pmu);
if (ret)
return ret;
}
vcpu->arch.pmu.created = true;
return 0; return 0;
} }
#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
/* /*
* For one VM the interrupt type must be same for each vcpu. * For one VM the interrupt type must be same for each vcpu.
* As a PPI, the interrupt number is the same for all vcpus, * As a PPI, the interrupt number is the same for all vcpus,
...@@ -512,6 +551,9 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -512,6 +551,9 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr; int __user *uaddr = (int __user *)(long)attr->addr;
int irq; int irq;
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENODEV; return -ENODEV;
...@@ -519,7 +561,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -519,7 +561,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EFAULT; return -EFAULT;
/* The PMU overflow interrupt can be a PPI or a valid SPI. */ /* The PMU overflow interrupt can be a PPI or a valid SPI. */
if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq))) if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
return -EINVAL; return -EINVAL;
if (!pmu_irq_is_valid(vcpu->kvm, irq)) if (!pmu_irq_is_valid(vcpu->kvm, irq))
...@@ -546,6 +588,9 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -546,6 +588,9 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr; int __user *uaddr = (int __user *)(long)attr->addr;
int irq; int irq;
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENODEV; return -ENODEV;
......
...@@ -57,6 +57,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) ...@@ -57,6 +57,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* for KVM will preserve the register state. * for KVM will preserve the register state.
*/ */
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return PSCI_RET_SUCCESS; return PSCI_RET_SUCCESS;
} }
...@@ -64,6 +65,8 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) ...@@ -64,6 +65,8 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.power_off = true; vcpu->arch.power_off = true;
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
} }
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
...@@ -178,10 +181,9 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) ...@@ -178,10 +181,9 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
* after this call is handled and before the VCPUs have been * after this call is handled and before the VCPUs have been
* re-initialized. * re-initialized.
*/ */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) { kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true; tmp->arch.power_off = true;
kvm_vcpu_kick(tmp); kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
}
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type; vcpu->run->system_event.type = type;
......
...@@ -34,7 +34,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e, ...@@ -34,7 +34,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
if (!vgic_valid_spi(kvm, spi_id)) if (!vgic_valid_spi(kvm, spi_id))
return -EINVAL; return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, spi_id, level); return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL);
} }
/** /**
......
...@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, ...@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
switch (addr & 0xff) { switch (addr & 0xff) {
case GIC_CPU_CTRL: case GIC_CPU_CTRL:
val = vmcr.ctlr; val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
break; break;
case GIC_CPU_PRIMASK: case GIC_CPU_PRIMASK:
/* /*
...@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, ...@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
switch (addr & 0xff) { switch (addr & 0xff) {
case GIC_CPU_CTRL: case GIC_CPU_CTRL:
vmcr.ctlr = val; vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
break; break;
case GIC_CPU_PRIMASK: case GIC_CPU_PRIMASK:
/* /*
...@@ -296,34 +308,36 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { ...@@ -296,34 +308,36 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
vgic_mmio_read_rao, vgic_mmio_write_wi, 1, vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
vgic_mmio_read_enable, vgic_mmio_write_senable, 1, vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
vgic_mmio_read_pending, vgic_mmio_write_spending, 1, vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
vgic_mmio_read_active, vgic_mmio_write_sactive, 1, vgic_mmio_read_active, vgic_mmio_write_sactive,
NULL, vgic_mmio_uaccess_write_sactive, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
vgic_mmio_read_active, vgic_mmio_write_cactive, 1, vgic_mmio_read_active, vgic_mmio_write_cactive,
NULL, vgic_mmio_uaccess_write_cactive, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
vgic_mmio_read_priority, vgic_mmio_write_priority, 8, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
vgic_mmio_read_target, vgic_mmio_write_target, 8, vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG, REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
vgic_mmio_read_config, vgic_mmio_write_config, 2, vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT, REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
......
...@@ -456,11 +456,13 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { ...@@ -456,11 +456,13 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
vgic_mmio_read_raz, vgic_mmio_write_wi, 1, vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
vgic_mmio_read_active, vgic_mmio_write_sactive, NULL, NULL, 1, vgic_mmio_read_active, vgic_mmio_write_sactive,
NULL, vgic_mmio_uaccess_write_sactive, 1,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
vgic_mmio_read_active, vgic_mmio_write_cactive, NULL, NULL, 1, vgic_mmio_read_active, vgic_mmio_write_cactive,
VGIC_ACCESS_32bit), NULL, vgic_mmio_uaccess_write_cactive,
1, VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
...@@ -526,12 +528,14 @@ static const struct vgic_register_region vgic_v3_sgibase_registers[] = { ...@@ -526,12 +528,14 @@ static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
vgic_mmio_read_pending, vgic_mmio_write_cpending, vgic_mmio_read_pending, vgic_mmio_write_cpending,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4, vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0, REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0,
vgic_mmio_read_active, vgic_mmio_write_sactive, 4, vgic_mmio_read_active, vgic_mmio_write_sactive,
VGIC_ACCESS_32bit), NULL, vgic_mmio_uaccess_write_sactive,
REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0, 4, VGIC_ACCESS_32bit),
vgic_mmio_read_active, vgic_mmio_write_cactive, 4, REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0,
VGIC_ACCESS_32bit), vgic_mmio_read_active, vgic_mmio_write_cactive,
NULL, vgic_mmio_uaccess_write_cactive,
4, VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0, REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
vgic_mmio_read_priority, vgic_mmio_write_priority, 32, vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
......
...@@ -231,56 +231,94 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -231,56 +231,94 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
* be migrated while we don't hold the IRQ locks and we don't want to be * be migrated while we don't hold the IRQ locks and we don't want to be
* chasing moving targets. * chasing moving targets.
* *
* For private interrupts, we only have to make sure the single and only VCPU * For private interrupts we don't have to do anything because userspace
* that can potentially queue the IRQ is stopped. * accesses to the VGIC state already require all VCPUs to be stopped, and
* only the VCPU itself can modify its private interrupts active state, which
* guarantees that the VCPU is not running.
*/ */
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{ {
if (intid < VGIC_NR_PRIVATE_IRQS) if (intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_vcpu(vcpu);
else
kvm_arm_halt_guest(vcpu->kvm); kvm_arm_halt_guest(vcpu->kvm);
} }
/* See vgic_change_active_prepare */ /* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{ {
if (intid < VGIC_NR_PRIVATE_IRQS) if (intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_vcpu(vcpu);
else
kvm_arm_resume_guest(vcpu->kvm); kvm_arm_resume_guest(vcpu->kvm);
} }
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val) unsigned long val)
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i; int i;
vgic_change_active_prepare(vcpu, intid);
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false); vgic_mmio_change_active(vcpu, irq, false);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
vgic_change_active_finish(vcpu, intid);
} }
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val) unsigned long val)
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
mutex_lock(&vcpu->kvm->lock);
vgic_change_active_prepare(vcpu, intid); vgic_change_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_change_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
}
void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
__vgic_mmio_write_cactive(vcpu, addr, len, val);
}
static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true); vgic_mmio_change_active(vcpu, irq, true);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
}
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
vgic_change_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_change_active_finish(vcpu, intid); vgic_change_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
}
void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
__vgic_mmio_write_sactive(vcpu, addr, len, val);
} }
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
......
...@@ -75,7 +75,7 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; ...@@ -75,7 +75,7 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
* The _WITH_LENGTH version instantiates registers with a fixed length * The _WITH_LENGTH version instantiates registers with a fixed length
* and is mutually exclusive with the _PER_IRQ version. * and is mutually exclusive with the _PER_IRQ version.
*/ */
#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \ #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \
{ \ { \
.reg_offset = off, \ .reg_offset = off, \
.bits_per_irq = bpi, \ .bits_per_irq = bpi, \
...@@ -83,6 +83,8 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; ...@@ -83,6 +83,8 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
.access_flags = acc, \ .access_flags = acc, \
.read = rd, \ .read = rd, \
.write = wr, \ .write = wr, \
.uaccess_read = ur, \
.uaccess_write = uw, \
} }
#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \ #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
...@@ -165,6 +167,14 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, ...@@ -165,6 +167,14 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val); unsigned long val);
void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len); gpa_t addr, unsigned int len);
......
...@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u32 vmcr; u32 vmcr;
vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
GICH_VMCR_ENABLE_GRP0_MASK;
vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
GICH_VMCR_ENABLE_GRP1_MASK;
vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
GICH_VMCR_ACK_CTL_MASK;
vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
GICH_VMCR_FIQ_EN_MASK;
vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
GICH_VMCR_CBPR_MASK;
vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
GICH_VMCR_EOI_MODE_MASK;
vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
GICH_VMCR_ALIAS_BINPOINT_MASK; GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
...@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcr = cpu_if->vgic_vmcr; vmcr = cpu_if->vgic_vmcr;
vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
GICH_VMCR_CTRL_SHIFT; GICH_VMCR_ENABLE_GRP0_SHIFT;
vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
GICH_VMCR_ENABLE_GRP1_SHIFT;
vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
GICH_VMCR_ACK_CTL_SHIFT;
vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
GICH_VMCR_FIQ_EN_SHIFT;
vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
GICH_VMCR_CBPR_SHIFT;
vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
GICH_VMCR_EOI_MODE_SHIFT;
vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
GICH_VMCR_ALIAS_BINPOINT_SHIFT; GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
......
...@@ -21,6 +21,10 @@ ...@@ -21,6 +21,10 @@
#include "vgic.h" #include "vgic.h"
static bool group0_trap;
static bool group1_trap;
static bool common_trap;
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
...@@ -159,15 +163,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) ...@@ -159,15 +163,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr; u32 vmcr;
/* if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
* Ignore the FIQen bit, because GIC emulation always implies vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
* SRE=1 which means the vFIQEn bit is also RES1. ICH_VMCR_ACK_CTL_MASK;
*/ vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << ICH_VMCR_FIQ_EN_MASK;
ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; } else {
vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; /*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcr = ICH_VMCR_FIQ_EN_MASK;
}
vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
...@@ -180,17 +193,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -180,17 +193,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr; u32 vmcr;
vmcr = cpu_if->vgic_vmcr; vmcr = cpu_if->vgic_vmcr;
/* if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
* Ignore the FIQen bit, because GIC emulation always implies vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
* SRE=1 which means the vFIQEn bit is also RES1. ICH_VMCR_ACK_CTL_SHIFT;
*/ vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << ICH_VMCR_FIQ_EN_SHIFT;
ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; } else {
vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; /*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcrp->fiqen = 1;
vmcrp->ackctl = 0;
}
vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
...@@ -239,6 +262,12 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) ...@@ -239,6 +262,12 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
/* Get the show on the road... */ /* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN; vgic_v3->vgic_hcr = ICH_HCR_EN;
if (group0_trap)
vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
if (group1_trap)
vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
if (common_trap)
vgic_v3->vgic_hcr |= ICH_HCR_TC;
} }
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
...@@ -410,6 +439,26 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -410,6 +439,26 @@ int vgic_v3_map_resources(struct kvm *kvm)
return ret; return ret;
} }
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
static int __init early_group0_trap_cfg(char *buf)
{
return strtobool(buf, &group0_trap);
}
early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
static int __init early_group1_trap_cfg(char *buf)
{
return strtobool(buf, &group1_trap);
}
early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
static int __init early_common_trap_cfg(char *buf)
{
return strtobool(buf, &common_trap);
}
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
/** /**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node * @node: pointer to the DT node
...@@ -461,6 +510,21 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -461,6 +510,21 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (kvm_vgic_global_state.vcpu_base == 0) if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n"); kvm_info("disabling GICv2 emulation\n");
#ifdef CONFIG_ARM64
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
group0_trap = true;
group1_trap = true;
}
#endif
if (group0_trap || group1_trap || common_trap) {
kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
group0_trap ? "G0" : "",
group1_trap ? "G1" : "",
common_trap ? "C" : "");
static_branch_enable(&vgic_v3_cpuif_trap);
}
kvm_vgic_global_state.vctrl_base = NULL; kvm_vgic_global_state.vctrl_base = NULL;
kvm_vgic_global_state.type = VGIC_V3; kvm_vgic_global_state.type = VGIC_V3;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
......
...@@ -35,11 +35,12 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -35,11 +35,12 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/* /*
* Locking order is always: * Locking order is always:
* its->cmd_lock (mutex) * kvm->lock (mutex)
* its->its_lock (mutex) * its->cmd_lock (mutex)
* vgic_cpu->ap_list_lock * its->its_lock (mutex)
* kvm->lpi_list_lock * vgic_cpu->ap_list_lock
* vgic_irq->irq_lock * kvm->lpi_list_lock
* vgic_irq->irq_lock
* *
* If you need to take multiple locks, always take the upper lock first, * If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock. * then the lower ones, e.g. first take the its_lock, then the irq_lock.
...@@ -234,10 +235,14 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) ...@@ -234,10 +235,14 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
/* /*
* Only valid injection if changing level for level-triggered IRQs or for a * Only valid injection if changing level for level-triggered IRQs or for a
* rising edge. * rising edge, and in-kernel connected IRQ lines can only be controlled by
* their owner.
*/ */
static bool vgic_validate_injection(struct vgic_irq *irq, bool level) static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
{ {
if (irq->owner != owner)
return false;
switch (irq->config) { switch (irq->config) {
case VGIC_CONFIG_LEVEL: case VGIC_CONFIG_LEVEL:
return irq->line_level != level; return irq->line_level != level;
...@@ -285,8 +290,10 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) ...@@ -285,8 +290,10 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
* won't see this one until it exits for some other * won't see this one until it exits for some other
* reason. * reason.
*/ */
if (vcpu) if (vcpu) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
}
return false; return false;
} }
...@@ -332,6 +339,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) ...@@ -332,6 +339,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
return true; return true;
...@@ -346,13 +354,16 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) ...@@ -346,13 +354,16 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
* false: to ignore the call * false: to ignore the call
* Level-sensitive true: raise the input signal * Level-sensitive true: raise the input signal
* false: lower the input signal * false: lower the input signal
* @owner: The opaque pointer to the owner of the IRQ being raised to verify
* that the caller is allowed to inject this IRQ. Userspace
* injections will have owner == NULL.
* *
* The VGIC is not concerned with devices being active-LOW or active-HIGH for * The VGIC is not concerned with devices being active-LOW or active-HIGH for
* level-sensitive interrupts. You can think of the level parameter as 1 * level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH. * being HIGH and 0 being LOW and all devices being active-HIGH.
*/ */
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level) bool level, void *owner)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct vgic_irq *irq; struct vgic_irq *irq;
...@@ -374,7 +385,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, ...@@ -374,7 +385,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
if (!vgic_validate_injection(irq, level)) { if (!vgic_validate_injection(irq, level, owner)) {
/* Nothing to see here, move along... */ /* Nothing to see here, move along... */
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(kvm, irq); vgic_put_irq(kvm, irq);
...@@ -430,6 +441,39 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) ...@@ -430,6 +441,39 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
return 0; return 0;
} }
/**
* kvm_vgic_set_owner - Set the owner of an interrupt for a VM
*
* @vcpu: Pointer to the VCPU (used for PPIs)
* @intid: The virtual INTID identifying the interrupt (PPI or SPI)
* @owner: Opaque pointer to the owner
*
* Returns 0 if intid is not already used by another in-kernel device and the
* owner is set, otherwise returns an error code.
*/
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
{
struct vgic_irq *irq;
int ret = 0;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
/* SGIs and LPIs cannot be wired up to any device */
if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
return -EINVAL;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock);
if (irq->owner && irq->owner != owner)
ret = -EEXIST;
else
irq->owner = owner;
spin_unlock(&irq->irq_lock);
return ret;
}
/** /**
* vgic_prune_ap_list - Remove non-relevant interrupts from the list * vgic_prune_ap_list - Remove non-relevant interrupts from the list
* *
...@@ -721,8 +765,10 @@ void vgic_kick_vcpus(struct kvm *kvm) ...@@ -721,8 +765,10 @@ void vgic_kick_vcpus(struct kvm *kvm)
* a good kick... * a good kick...
*/ */
kvm_for_each_vcpu(c, vcpu, kvm) { kvm_for_each_vcpu(c, vcpu, kvm) {
if (kvm_vgic_vcpu_pending_irq(vcpu)) if (kvm_vgic_vcpu_pending_irq(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
}
} }
} }
......
...@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) ...@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
* registers regardless of the hardware backed GIC used. * registers regardless of the hardware backed GIC used.
*/ */
struct vgic_vmcr { struct vgic_vmcr {
u32 ctlr; u32 grpen0;
u32 grpen1;
u32 ackctl;
u32 fiqen;
u32 cbpr;
u32 eoim;
u32 abpr; u32 abpr;
u32 bpr; u32 bpr;
u32 pmr; /* Priority mask field in the GICC_PMR and u32 pmr; /* Priority mask field in the GICC_PMR and
* ICC_PMR_EL1 priority field format */ * ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32 grpen0;
u32 grpen1;
}; };
struct vgic_reg_attr { struct vgic_reg_attr {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment