Commit 6f2f10ca authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvmarm-master/master' into HEAD

parents ebb127f2 33b5c388
...@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); ...@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/vfp.h> #include <asm/vfp.h>
#include "../vfp/vfpinstr.h" #include "../vfp/vfpinstr.h"
#define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
#include "coproc.h" #include "coproc.h"
...@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1; return 1;
} }
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{ {
/* /*
...@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, ...@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least. * all PM registers, which doesn't crash the guest kernel at least.
*/ */
static bool pm_fake(struct kvm_vcpu *vcpu, static bool trap_raz_wi(struct kvm_vcpu *vcpu,
const struct coproc_params *p, const struct coproc_params *p,
const struct coproc_reg *r) const struct coproc_reg *r)
{ {
...@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu, ...@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p); return read_zero(vcpu, p);
} }
#define access_pmcr pm_fake #define access_pmcr trap_raz_wi
#define access_pmcntenset pm_fake #define access_pmcntenset trap_raz_wi
#define access_pmcntenclr pm_fake #define access_pmcntenclr trap_raz_wi
#define access_pmovsr pm_fake #define access_pmovsr trap_raz_wi
#define access_pmselr pm_fake #define access_pmselr trap_raz_wi
#define access_pmceid0 pm_fake #define access_pmceid0 trap_raz_wi
#define access_pmceid1 pm_fake #define access_pmceid1 trap_raz_wi
#define access_pmccntr pm_fake #define access_pmccntr trap_raz_wi
#define access_pmxevtyper pm_fake #define access_pmxevtyper trap_raz_wi
#define access_pmxevcntr pm_fake #define access_pmxevcntr trap_raz_wi
#define access_pmuserenr pm_fake #define access_pmuserenr trap_raz_wi
#define access_pmintenset pm_fake #define access_pmintenset trap_raz_wi
#define access_pmintenclr pm_fake #define access_pmintenclr trap_raz_wi
/* Architected CP15 registers. /* Architected CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the * CRn denotes the primary register number, but is copied to the CRm in the
...@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, ...@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
return 1; return 1;
} }
/** static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
struct coproc_params params; struct coproc_params params;
...@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
params.CRm = 0; params.CRm = 0;
return params;
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_64bit_hsr(vcpu);
return emulate_cp15(vcpu, &params); return emulate_cp15(vcpu, &params);
} }
/**
* kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_64bit_hsr(vcpu);
/* raz_wi cp14 */
trap_raz_wi(vcpu, &params, NULL);
/* handled */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
static void reset_coproc_regs(struct kvm_vcpu *vcpu, static void reset_coproc_regs(struct kvm_vcpu *vcpu,
const struct coproc_reg *table, size_t num) const struct coproc_reg *table, size_t num)
{ {
...@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu, ...@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
table[i].reset(vcpu, &table[i]); table[i].reset(vcpu, &table[i]);
} }
/** static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
struct coproc_params params; struct coproc_params params;
...@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
params.Rt2 = 0; params.Rt2 = 0;
return params;
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_32bit_hsr(vcpu);
return emulate_cp15(vcpu, &params); return emulate_cp15(vcpu, &params);
} }
/**
* kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_32bit_hsr(vcpu);
/* raz_wi cp14 */
trap_raz_wi(vcpu, &params, NULL);
/* handled */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/****************************************************************************** /******************************************************************************
* Userspace API * Userspace API
*****************************************************************************/ *****************************************************************************/
......
...@@ -96,9 +96,9 @@ static exit_handle_fn arm_exit_handlers[] = { ...@@ -96,9 +96,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[HSR_EC_WFI] = kvm_handle_wfx, [HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32, [HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64, [HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_access, [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
[HSR_EC_CP14_64] = kvm_handle_cp14_access, [HSR_EC_CP14_64] = kvm_handle_cp14_64,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id, [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_HVC] = handle_hvc, [HSR_EC_HVC] = handle_hvc,
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part # Makefile for Kernel-based Virtual Machine module, HYP part
# #
ccflags-y += -fno-stack-protector
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
......
...@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) ...@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
write_sysreg(HSTR_T(15), HSTR); write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
val = read_sysreg(HDCR); val = read_sysreg(HDCR);
write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
write_sysreg(val, HDCR);
} }
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
......
...@@ -104,7 +104,6 @@ __do_hyp_init: ...@@ -104,7 +104,6 @@ __do_hyp_init:
@ - Write permission implies XN: disabled @ - Write permission implies XN: disabled
@ - Instruction cache: enabled @ - Instruction cache: enabled
@ - Data/Unified cache: enabled @ - Data/Unified cache: enabled
@ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping) @ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK ldr r2, =HSCTLR_MASK
...@@ -112,8 +111,8 @@ __do_hyp_init: ...@@ -112,8 +111,8 @@ __do_hyp_init:
mrc p15, 0, r1, c1, c0, 0 @ SCTLR mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2 and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2 orr r1, r1, r2
orr r0, r0, r1 orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR mcr p15, 4, r0, c1, c0, 0 @ HSCR
......
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H #define _TRACE_ARM_KVM_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
...@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc, ...@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm) __entry->vcpu_pc, __entry->r0, __entry->imm)
); );
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_ARM_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH arch/arm/kvm #define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE #undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
......
...@@ -286,6 +286,10 @@ ...@@ -286,6 +286,10 @@
#define SCTLR_ELx_A (1 << 1) #define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1 #define SCTLR_ELx_M 1
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
(1 << 28) | (1 << 29))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I) SCTLR_ELx_SA | SCTLR_ELx_I)
......
...@@ -106,10 +106,13 @@ __do_hyp_init: ...@@ -106,10 +106,13 @@ __do_hyp_init:
tlbi alle2 tlbi alle2
dsb sy dsb sy
mrs x4, sctlr_el2 /*
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 * Preserve all the RES1 bits while setting the default flags,
ldr x5, =SCTLR_ELx_FLAGS * as well as the EE bit on BE. Drop the A flag since the compiler
orr x4, x4, x5 * is allowed to generate unaligned accesses.
*/
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4 msr sctlr_el2, x4
isb isb
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part # Makefile for Kernel-based Virtual Machine module, HYP part
# #
ccflags-y += -fno-stack-protector
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
......
...@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout. * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout. * The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/ */
vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else { } else {
val = 0; val = 0;
...@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout. * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions. * Extract it directly using ICC_CTLR_EL1 reg definitions.
*/ */
val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
p->regval = val; p->regval = val;
} }
...@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p->regval = 0; p->regval = 0;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { if (!vmcr.cbpr) {
if (p->is_write) { if (p->is_write) {
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT; ICC_BPR1_EL1_SHIFT;
......
...@@ -202,7 +202,10 @@ struct vgic_dist { ...@@ -202,7 +202,10 @@ struct vgic_dist {
/* either a GICv2 CPU interface */ /* either a GICv2 CPU interface */
gpa_t vgic_cpu_base; gpa_t vgic_cpu_base;
/* or a number of GICv3 redistributor regions */ /* or a number of GICv3 redistributor regions */
gpa_t vgic_redist_base; struct {
gpa_t vgic_redist_base;
gpa_t vgic_redist_free_offset;
};
}; };
/* distributor enabled */ /* distributor enabled */
......
...@@ -417,6 +417,10 @@ ...@@ -417,6 +417,10 @@
#define ICH_HCR_EN (1 << 0) #define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1) #define ICH_HCR_UIE (1 << 1)
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
#define ICH_VMCR_FIQ_EN_SHIFT 3
#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
#define ICH_VMCR_CBPR_SHIFT 4 #define ICH_VMCR_CBPR_SHIFT 4
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
#define ICH_VMCR_EOIM_SHIFT 9 #define ICH_VMCR_EOIM_SHIFT 9
......
...@@ -25,7 +25,18 @@ ...@@ -25,7 +25,18 @@
#define GICC_ENABLE 0x1 #define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0 #define GICC_INT_PRI_THRESHOLD 0xf0
#define GIC_CPU_CTRL_EOImodeNS (1 << 9) #define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
#define GIC_CPU_CTRL_AckCtl_SHIFT 2
#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
#define GIC_CPU_CTRL_FIQEn_SHIFT 3
#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
#define GIC_CPU_CTRL_CBPR_SHIFT 4
#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
#define GICC_IAR_INT_ID_MASK 0x3ff #define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023 #define GICC_INT_SPURIOUS 1023
...@@ -84,8 +95,19 @@ ...@@ -84,8 +95,19 @@
#define GICH_LR_EOI (1 << 19) #define GICH_LR_EOI (1 << 19)
#define GICH_LR_HW (1 << 31) #define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0 #define GICH_VMCR_ENABLE_GRP0_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) #define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
#define GICH_VMCR_ACK_CTL_SHIFT 2
#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
#define GICH_VMCR_FIQ_EN_SHIFT 3
#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
#define GICH_VMCR_CBPR_SHIFT 4
#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
#define GICH_VMCR_EOI_MODE_SHIFT 9
#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
#define GICH_VMCR_PRIMASK_SHIFT 27 #define GICH_VMCR_PRIMASK_SHIFT 27
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
#define GICH_VMCR_BINPOINT_SHIFT 21 #define GICH_VMCR_BINPOINT_SHIFT 21
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#define vtr_to_max_lr_idx(v) ((v) & 0xf) #define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
{ {
...@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
if (used_lrs) { if (used_lrs) {
int i; int i;
u32 nr_pri_bits; u32 nr_pre_bits;
cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
write_gicreg(0, ICH_HCR_EL2); write_gicreg(0, ICH_HCR_EL2);
val = read_gicreg(ICH_VTR_EL2); val = read_gicreg(ICH_VTR_EL2);
nr_pri_bits = vtr_to_nr_pri_bits(val); nr_pre_bits = vtr_to_nr_pre_bits(val);
for (i = 0; i < used_lrs; i++) { for (i = 0; i < used_lrs; i++) {
if (cpu_if->vgic_elrsr & (1 << i)) if (cpu_if->vgic_elrsr & (1 << i))
...@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
__gic_v3_set_lr(0, i); __gic_v3_set_lr(0, i);
} }
switch (nr_pri_bits) { switch (nr_pre_bits) {
case 7: case 7:
cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
...@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
} }
switch (nr_pri_bits) { switch (nr_pre_bits) {
case 7: case 7:
cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
...@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 val; u64 val;
u32 nr_pri_bits; u32 nr_pre_bits;
int i; int i;
/* /*
...@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
} }
val = read_gicreg(ICH_VTR_EL2); val = read_gicreg(ICH_VTR_EL2);
nr_pri_bits = vtr_to_nr_pri_bits(val); nr_pre_bits = vtr_to_nr_pre_bits(val);
if (used_lrs) { if (used_lrs) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
switch (nr_pri_bits) { switch (nr_pre_bits) {
case 7: case 7:
write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
...@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
} }
switch (nr_pri_bits) { switch (nr_pre_bits) {
case 7: case 7:
write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
......
...@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) ...@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked(&kvm->mmu_lock); assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr); pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do { do {
/*
* Make sure the page table is still active, as another thread
* could have possibly freed the page table, while we released
* the lock.
*/
if (!READ_ONCE(kvm->arch.pgd))
break;
next = stage2_pgd_addr_end(addr, end); next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd)) if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next); unmap_stage2_puds(kvm, pgd, addr, next);
...@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm) ...@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm)
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table * underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL. * and setting the struct pointer to NULL.
*
* Note we don't need locking here as this is only called when the VM is
* destroyed, which can only be done once.
*/ */
void kvm_free_stage2_pgd(struct kvm *kvm) void kvm_free_stage2_pgd(struct kvm *kvm)
{ {
if (kvm->arch.pgd == NULL) void *pgd = NULL;
return;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); if (kvm->arch.pgd) {
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* Free the HW pgd, one page at a time */ /* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); if (pgd)
kvm->arch.pgd = NULL; free_pages_exact(pgd, S2_PGD_SIZE);
} }
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
...@@ -872,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -872,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd; pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr); pud = stage2_get_pud(kvm, cache, addr);
if (!pud)
return NULL;
if (stage2_pud_none(*pud)) { if (stage2_pud_none(*pud)) {
if (!cache) if (!cache)
return NULL; return NULL;
...@@ -1170,11 +1180,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) ...@@ -1170,11 +1180,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
* large. Otherwise, we may see kernel panics with * large. Otherwise, we may see kernel panics with
* CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
* CONFIG_LOCKDEP. Additionally, holding the lock too long * CONFIG_LOCKDEP. Additionally, holding the lock too long
* will also starve other vCPUs. * will also starve other vCPUs. We have to also make sure
* that the page tables are not freed while we released
* the lock.
*/ */
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) cond_resched_lock(&kvm->mmu_lock);
cond_resched_lock(&kvm->mmu_lock); if (!READ_ONCE(kvm->arch.pgd))
break;
next = stage2_pgd_addr_end(addr, end); next = stage2_pgd_addr_end(addr, end);
if (stage2_pgd_present(*pgd)) if (stage2_pgd_present(*pgd))
stage2_wp_puds(pgd, addr, next); stage2_wp_puds(pgd, addr, next);
......
...@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* If we are creating a VCPU with a GICv3 we must also register the * If we are creating a VCPU with a GICv3 we must also register the
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
*/ */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->lock);
ret = vgic_register_redist_iodev(vcpu); ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->lock);
}
return ret; return ret;
} }
......
...@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, ...@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
switch (addr & 0xff) { switch (addr & 0xff) {
case GIC_CPU_CTRL: case GIC_CPU_CTRL:
val = vmcr.ctlr; val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
break; break;
case GIC_CPU_PRIMASK: case GIC_CPU_PRIMASK:
/* /*
...@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, ...@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
switch (addr & 0xff) { switch (addr & 0xff) {
case GIC_CPU_CTRL: case GIC_CPU_CTRL:
vmcr.ctlr = val; vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
break; break;
case GIC_CPU_PRIMASK: case GIC_CPU_PRIMASK:
/* /*
......
...@@ -590,7 +590,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -590,7 +590,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
if (!vgic_v3_check_base(kvm)) if (!vgic_v3_check_base(kvm))
return -EINVAL; return -EINVAL;
rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset;
sgi_base = rd_base + SZ_64K; sgi_base = rd_base + SZ_64K;
kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
...@@ -618,11 +618,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -618,11 +618,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
SZ_64K, &sgi_dev->dev); SZ_64K, &sgi_dev->dev);
mutex_unlock(&kvm->slots_lock); if (ret) {
if (ret)
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
&rd_dev->dev); &rd_dev->dev);
goto out;
}
vgic->vgic_redist_free_offset += 2 * SZ_64K;
out:
mutex_unlock(&kvm->slots_lock);
return ret; return ret;
} }
...@@ -648,10 +652,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) ...@@ -648,10 +652,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
if (ret) { if (ret) {
/* The current c failed, so we start with the previous one. */ /* The current c failed, so we start with the previous one. */
mutex_lock(&kvm->slots_lock);
for (c--; c >= 0; c--) { for (c--; c >= 0; c--) {
vcpu = kvm_get_vcpu(kvm, c); vcpu = kvm_get_vcpu(kvm, c);
vgic_unregister_redist_iodev(vcpu); vgic_unregister_redist_iodev(vcpu);
} }
mutex_unlock(&kvm->slots_lock);
} }
return ret; return ret;
......
...@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (irq->hw) { if (irq->hw) {
val |= GICH_LR_HW; val |= GICH_LR_HW;
val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
/*
* Never set pending+active on a HW interrupt, as the
* pending state is kept at the physical distributor
* level.
*/
if (irq->active && irq_is_pending(irq))
val &= ~GICH_LR_PENDING_BIT;
} else { } else {
if (irq->config == VGIC_CONFIG_LEVEL) if (irq->config == VGIC_CONFIG_LEVEL)
val |= GICH_LR_EOI; val |= GICH_LR_EOI;
...@@ -170,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -170,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u32 vmcr; u32 vmcr;
vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
GICH_VMCR_ENABLE_GRP0_MASK;
vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
GICH_VMCR_ENABLE_GRP1_MASK;
vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
GICH_VMCR_ACK_CTL_MASK;
vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
GICH_VMCR_FIQ_EN_MASK;
vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
GICH_VMCR_CBPR_MASK;
vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
GICH_VMCR_EOI_MODE_MASK;
vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
GICH_VMCR_ALIAS_BINPOINT_MASK; GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
...@@ -188,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -188,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcr = cpu_if->vgic_vmcr; vmcr = cpu_if->vgic_vmcr;
vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
GICH_VMCR_CTRL_SHIFT; GICH_VMCR_ENABLE_GRP0_SHIFT;
vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
GICH_VMCR_ENABLE_GRP1_SHIFT;
vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
GICH_VMCR_ACK_CTL_SHIFT;
vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
GICH_VMCR_FIQ_EN_SHIFT;
vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
GICH_VMCR_CBPR_SHIFT;
vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
GICH_VMCR_EOI_MODE_SHIFT;
vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
GICH_VMCR_ALIAS_BINPOINT_SHIFT; GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
......
...@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) ...@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (irq->hw) { if (irq->hw) {
val |= ICH_LR_HW; val |= ICH_LR_HW;
val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
/*
* Never set pending+active on a HW interrupt, as the
* pending state is kept at the physical distributor
* level.
*/
if (irq->active && irq_is_pending(irq))
val &= ~ICH_LR_PENDING_BIT;
} else { } else {
if (irq->config == VGIC_CONFIG_LEVEL) if (irq->config == VGIC_CONFIG_LEVEL)
val |= ICH_LR_EOI; val |= ICH_LR_EOI;
...@@ -152,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) ...@@ -152,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr; u32 vmcr;
/* if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
* Ignore the FIQen bit, because GIC emulation always implies vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
* SRE=1 which means the vFIQEn bit is also RES1. ICH_VMCR_ACK_CTL_MASK;
*/ vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << ICH_VMCR_FIQ_EN_MASK;
ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; } else {
vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; /*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcr = ICH_VMCR_FIQ_EN_MASK;
}
vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
...@@ -173,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ...@@ -173,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u32 vmcr; u32 vmcr;
vmcr = cpu_if->vgic_vmcr; vmcr = cpu_if->vgic_vmcr;
/* if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
* Ignore the FIQen bit, because GIC emulation always implies vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
* SRE=1 which means the vFIQEn bit is also RES1. ICH_VMCR_ACK_CTL_SHIFT;
*/ vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << ICH_VMCR_FIQ_EN_SHIFT;
ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; } else {
vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; /*
* When emulating GICv3 on GICv3 with SRE=1 on the
* VFIQEn bit is RES1 and the VAckCtl bit is RES0.
*/
vmcrp->fiqen = 1;
vmcrp->ackctl = 0;
}
vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
......
...@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) ...@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
* registers regardless of the hardware backed GIC used. * registers regardless of the hardware backed GIC used.
*/ */
struct vgic_vmcr { struct vgic_vmcr {
u32 ctlr; u32 grpen0;
u32 grpen1;
u32 ackctl;
u32 fiqen;
u32 cbpr;
u32 eoim;
u32 abpr; u32 abpr;
u32 bpr; u32 bpr;
u32 pmr; /* Priority mask field in the GICC_PMR and u32 pmr; /* Priority mask field in the GICC_PMR and
* ICC_PMR_EL1 priority field format */ * ICC_PMR_EL1 priority field format */
/* Below member variable are valid only for GICv3 */
u32 grpen0;
u32 grpen1;
}; };
struct vgic_reg_attr { struct vgic_reg_attr {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment