Commit 71783e09 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-for-v5.1' of...

Merge tag 'kvmarm-for-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next

KVM/arm updates for Linux v5.1

- A number of pre-nested code rework
- Direct physical timer assignment on VHE systems
- kvm_call_hyp type safety enforcement
- Set/Way cache sanitisation for 32bit guests
- Build system cleanups
- A bunch of janitorial fixes
parents 8f060f53 c88b0936
...@@ -8304,29 +8304,25 @@ S: Maintained ...@@ -8304,29 +8304,25 @@ S: Maintained
F: arch/x86/include/asm/svm.h F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
M: Christoffer Dall <christoffer.dall@arm.com> M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com> M: Marc Zyngier <marc.zyngier@arm.com>
R: James Morse <james.morse@arm.com>
R: Julien Thierry <julien.thierry@arm.com>
R: Suzuki K Pouloze <suzuki.poulose@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm W: http://systems.cs.columbia.edu/projects/kvm-arm
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
S: Supported S: Maintained
F: arch/arm/include/uapi/asm/kvm* F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm* F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/ F: arch/arm/kvm/
F: virt/kvm/arm/
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
S: Maintained
F: arch/arm64/include/uapi/asm/kvm* F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/include/asm/kvm* F: arch/arm64/include/asm/kvm*
F: arch/arm64/kvm/ F: arch/arm64/kvm/
F: virt/kvm/arm/
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: James Hogan <jhogan@kernel.org> M: James Hogan <jhogan@kernel.org>
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1) #define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2) #define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3) #define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5) #define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7) #define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x) #define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
...@@ -151,7 +151,7 @@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2) ...@@ -151,7 +151,7 @@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
CPUIF_MAP(ICH_VTR, ICH_VTR_EL2) CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
CPUIF_MAP(ICH_MISR, ICH_MISR_EL2) CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
CPUIF_MAP(ICH_EISR, ICH_EISR_EL2) CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2) CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2) CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2) CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2) CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
......
...@@ -265,6 +265,14 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) ...@@ -265,6 +265,14 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
} }
} }
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
return kvm_vcpu_dabt_iswrite(vcpu);
}
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#include <asm/fpstate.h> #include <asm/fpstate.h>
#include <asm/smp_plat.h>
#include <kvm/arm_arch_timer.h> #include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED #define __KVM_HAVE_ARCH_INTC_INITIALIZED
...@@ -56,10 +57,13 @@ int __attribute_const__ kvm_target_cpu(void); ...@@ -56,10 +57,13 @@ int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu); void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_arch { struct kvm_vmid {
/* VTTBR value associated with below pgd and vmid */ /* The VMID generation used for the virt. memory system */
u64 vttbr; u64 vmid_gen;
u32 vmid;
};
struct kvm_arch {
/* The last vcpu id that ran on each physical CPU */ /* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran; int __percpu *last_vcpu_ran;
...@@ -69,11 +73,11 @@ struct kvm_arch { ...@@ -69,11 +73,11 @@ struct kvm_arch {
*/ */
/* The VMID generation used for the virt. memory system */ /* The VMID generation used for the virt. memory system */
u64 vmid_gen; struct kvm_vmid vmid;
u32 vmid;
/* Stage-2 page table */ /* Stage-2 page table */
pgd_t *pgd; pgd_t *pgd;
phys_addr_t pgd_phys;
/* Interrupt controller */ /* Interrupt controller */
struct vgic_dist vgic; struct vgic_dist vgic;
...@@ -147,6 +151,13 @@ struct kvm_cpu_context { ...@@ -147,6 +151,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t; typedef struct kvm_cpu_context kvm_cpu_context_t;
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
}
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt; struct kvm_cpu_context ctxt;
...@@ -214,7 +225,35 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); ...@@ -214,7 +225,35 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_call_hyp(void *hypfn, ...);
unsigned long __kvm_call_hyp(void *hypfn, ...);
/*
* The has_vhe() part doesn't get emitted, but is used for type-checking.
*/
#define kvm_call_hyp(f, ...) \
do { \
if (has_vhe()) { \
f(__VA_ARGS__); \
} else { \
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
} \
} while(0)
#define kvm_call_hyp_ret(f, ...) \
({ \
typeof(f(__VA_ARGS__)) ret; \
\
if (has_vhe()) { \
ret = f(__VA_ARGS__); \
} else { \
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
##__VA_ARGS__); \
} \
\
ret; \
})
void force_vm_exit(const cpumask_t *mask); void force_vm_exit(const cpumask_t *mask);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
...@@ -265,7 +304,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, ...@@ -265,7 +304,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* compliant with the PCS!). * compliant with the PCS!).
*/ */
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
} }
static inline void __cpu_init_stage2(void) static inline void __cpu_init_stage2(void)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define TTBR1 __ACCESS_CP15_64(1, c2) #define TTBR1 __ACCESS_CP15_64(1, c2)
#define VTTBR __ACCESS_CP15_64(6, c2) #define VTTBR __ACCESS_CP15_64(6, c2)
#define PAR __ACCESS_CP15_64(0, c7) #define PAR __ACCESS_CP15_64(0, c7)
#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
#define CNTV_CVAL __ACCESS_CP15_64(3, c14) #define CNTV_CVAL __ACCESS_CP15_64(3, c14)
#define CNTVOFF __ACCESS_CP15_64(4, c14) #define CNTVOFF __ACCESS_CP15_64(4, c14)
...@@ -85,6 +86,7 @@ ...@@ -85,6 +86,7 @@
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) #define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) #define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) #define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) #define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) #define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
...@@ -94,6 +96,8 @@ ...@@ -94,6 +96,8 @@
#define read_sysreg_el0(r) read_sysreg(r##_el0) #define read_sysreg_el0(r) read_sysreg(r##_el0)
#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) #define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
#define cntp_ctl_el0 CNTP_CTL
#define cntp_cval_el0 CNTP_CVAL
#define cntv_ctl_el0 CNTV_CTL #define cntv_ctl_el0 CNTV_CTL
#define cntv_cval_el0 CNTV_CVAL #define cntv_cval_el0 CNTV_CVAL
#define cntvoff_el2 CNTVOFF #define cntvoff_el2 CNTVOFF
......
...@@ -421,9 +421,14 @@ static inline int hyp_map_aux_data(void) ...@@ -421,9 +421,14 @@ static inline int hyp_map_aux_data(void)
static inline void kvm_set_ipa_limit(void) {} static inline void kvm_set_ipa_limit(void) {}
static inline bool kvm_cpu_has_cnp(void) static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{ {
return false; struct kvm_vmid *vmid = &kvm->arch.vmid;
u64 vmid_field, baddr;
baddr = kvm->arch.pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field;
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -8,9 +8,8 @@ ifeq ($(plus_virt),+virt) ...@@ -8,9 +8,8 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1 plus_virt_def := -DREQUIRES_VIRT=1
endif endif
ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
CFLAGS_arm.o := -I. $(plus_virt_def) CFLAGS_arm.o := $(plus_virt_def)
CFLAGS_mmu.o := -I.
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
......
...@@ -293,15 +293,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu, ...@@ -293,15 +293,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu,
const struct coproc_params *p, const struct coproc_params *p,
const struct coproc_reg *r) const struct coproc_reg *r)
{ {
u64 now = kvm_phys_timer_read(); u32 val;
u64 val;
if (p->is_write) { if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1); val = *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now); kvm_arm_timer_write_sysreg(vcpu,
TIMER_PTIMER, TIMER_REG_TVAL, val);
} else { } else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); val = kvm_arm_timer_read_sysreg(vcpu,
*vcpu_reg(vcpu, p->Rt1) = val - now; TIMER_PTIMER, TIMER_REG_TVAL);
*vcpu_reg(vcpu, p->Rt1) = val;
} }
return true; return true;
...@@ -315,9 +316,11 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu, ...@@ -315,9 +316,11 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
if (p->is_write) { if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1); val = *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val); kvm_arm_timer_write_sysreg(vcpu,
TIMER_PTIMER, TIMER_REG_CTL, val);
} else { } else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); val = kvm_arm_timer_read_sysreg(vcpu,
TIMER_PTIMER, TIMER_REG_CTL);
*vcpu_reg(vcpu, p->Rt1) = val; *vcpu_reg(vcpu, p->Rt1) = val;
} }
...@@ -333,9 +336,11 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, ...@@ -333,9 +336,11 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
if (p->is_write) { if (p->is_write) {
val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
val |= *vcpu_reg(vcpu, p->Rt1); val |= *vcpu_reg(vcpu, p->Rt1);
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val); kvm_arm_timer_write_sysreg(vcpu,
TIMER_PTIMER, TIMER_REG_CVAL, val);
} else { } else {
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); val = kvm_arm_timer_read_sysreg(vcpu,
TIMER_PTIMER, TIMER_REG_CVAL);
*vcpu_reg(vcpu, p->Rt1) = val; *vcpu_reg(vcpu, p->Rt1) = val;
*vcpu_reg(vcpu, p->Rt2) = val >> 32; *vcpu_reg(vcpu, p->Rt2) = val >> 32;
} }
......
...@@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) ...@@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR);
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
......
...@@ -176,7 +176,7 @@ THUMB( orr lr, lr, #PSR_T_BIT ) ...@@ -176,7 +176,7 @@ THUMB( orr lr, lr, #PSR_T_BIT )
msr spsr_cxsf, lr msr spsr_cxsf, lr
ldr lr, =panic ldr lr, =panic
msr ELR_hyp, lr msr ELR_hyp, lr
ldr lr, =kvm_call_hyp ldr lr, =__kvm_call_hyp
clrex clrex
eret eret
ENDPROC(__hyp_do_panic) ENDPROC(__hyp_do_panic)
......
...@@ -77,7 +77,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -77,7 +77,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
write_sysreg(kvm->arch.vttbr, VTTBR); write_sysreg(kvm_get_vttbr(kvm), VTTBR);
write_sysreg(vcpu->arch.midr, VPIDR); write_sysreg(vcpu->arch.midr, VPIDR);
} }
......
...@@ -41,7 +41,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) ...@@ -41,7 +41,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */ /* Switch to requested VMID */
kvm = kern_hyp_va(kvm); kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, VTTBR); write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb(); isb();
write_sysreg(0, TLBIALLIS); write_sysreg(0, TLBIALLIS);
...@@ -61,7 +61,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ...@@ -61,7 +61,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */ /* Switch to requested VMID */
write_sysreg(kvm->arch.vttbr, VTTBR); write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb(); isb();
write_sysreg(0, TLBIALL); write_sysreg(0, TLBIALL);
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* r12: caller save * r12: caller save
* rest: callee save * rest: callee save
*/ */
ENTRY(kvm_call_hyp) ENTRY(__kvm_call_hyp)
hvc #0 hvc #0
bx lr bx lr
ENDPROC(kvm_call_hyp) ENDPROC(__kvm_call_hyp)
...@@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
*/ */
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID3; vcpu->arch.hcr_el2 |= HCR_TID3;
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
} }
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
...@@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) ...@@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
return ESR_ELx_SYS64_ISS_RT(esr); return ESR_ELx_SYS64_ISS_RT(esr);
} }
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
return kvm_vcpu_dabt_iswrite(vcpu);
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{ {
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#include <asm/smp_plat.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED #define __KVM_HAVE_ARCH_INTC_INITIALIZED
...@@ -56,16 +57,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu); ...@@ -56,16 +57,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
struct kvm_arch { struct kvm_vmid {
/* The VMID generation used for the virt. memory system */ /* The VMID generation used for the virt. memory system */
u64 vmid_gen; u64 vmid_gen;
u32 vmid; u32 vmid;
};
struct kvm_arch {
struct kvm_vmid vmid;
/* stage2 entry level table */ /* stage2 entry level table */
pgd_t *pgd; pgd_t *pgd;
phys_addr_t pgd_phys;
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
/* VTCR_EL2 value for this VM */ /* VTCR_EL2 value for this VM */
u64 vtcr; u64 vtcr;
...@@ -370,7 +374,36 @@ void kvm_arm_halt_guest(struct kvm *kvm); ...@@ -370,7 +374,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
u64 __kvm_call_hyp(void *hypfn, ...); u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
/*
* The couple of isb() below are there to guarantee the same behaviour
* on VHE as on !VHE, where the eret to EL1 acts as a context
* synchronization event.
*/
#define kvm_call_hyp(f, ...) \
do { \
if (has_vhe()) { \
f(__VA_ARGS__); \
isb(); \
} else { \
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
} \
} while(0)
#define kvm_call_hyp_ret(f, ...) \
({ \
typeof(f(__VA_ARGS__)) ret; \
\
if (has_vhe()) { \
ret = f(__VA_ARGS__); \
isb(); \
} else { \
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
##__VA_ARGS__); \
} \
\
ret; \
})
void force_vm_exit(const cpumask_t *mask); void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
...@@ -389,6 +422,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); ...@@ -389,6 +422,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
}
void __kvm_enable_ssbs(void); void __kvm_enable_ssbs(void);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace #define __hyp_text __section(.hyp.text) notrace
...@@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...); ...@@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...);
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{ {
write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kvm->arch.vtcr, vtcr_el2);
write_sysreg(kvm->arch.vttbr, vttbr_el2); write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/* /*
* ARM erratum 1165522 requires the actual execution of the above * ARM erratum 1165522 requires the actual execution of the above
......
...@@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v) ...@@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
}) })
/* /*
* We currently only support a 40bit IPA. * We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
*/ */
#define KVM_PHYS_SHIFT (40) #define KVM_PHYS_SHIFT (40)
...@@ -591,9 +592,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) ...@@ -591,9 +592,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
} }
static inline bool kvm_cpu_has_cnp(void) static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{ {
return system_supports_cnp(); struct kvm_vmid *vmid = &kvm->arch.vmid;
u64 vmid_field, baddr;
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = kvm->arch.pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -361,6 +361,7 @@ ...@@ -361,6 +361,7 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) #define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
...@@ -392,6 +393,10 @@ ...@@ -392,6 +393,10 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
#define __PMEV_op2(n) ((n) & 0x7) #define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) #define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
...@@ -426,7 +431,7 @@ ...@@ -426,7 +431,7 @@
#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) #define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) #define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) #define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) #define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) #define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x) #define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
......
...@@ -3,9 +3,7 @@ ...@@ -3,9 +3,7 @@
# Makefile for Kernel-based Virtual Machine module # Makefile for Kernel-based Virtual Machine module
# #
ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
KVM=../../../virt/kvm KVM=../../../virt/kvm
......
...@@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) ...@@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
void kvm_arm_init_debug(void) void kvm_arm_init_debug(void)
{ {
__this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2)); __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
} }
/** /**
......
...@@ -40,9 +40,6 @@ ...@@ -40,9 +40,6 @@
* arch/arm64/kernel/hyp_stub.S. * arch/arm64/kernel/hyp_stub.S.
*/ */
ENTRY(__kvm_call_hyp) ENTRY(__kvm_call_hyp)
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
hvc #0 hvc #0
ret ret
alternative_else_nop_endif
b __vhe_hyp_call
ENDPROC(__kvm_call_hyp) ENDPROC(__kvm_call_hyp)
...@@ -43,18 +43,6 @@ ...@@ -43,18 +43,6 @@
ldr lr, [sp], #16 ldr lr, [sp], #16
.endm .endm
ENTRY(__vhe_hyp_call)
do_el2_call
/*
* We used to rely on having an exception return to get
* an implicit isb. In the E2H case, we don't have it anymore.
* rather than changing all the leaf functions, just do it here
* before returning to the rest of the kernel.
*/
isb
ret
ENDPROC(__vhe_hyp_call)
el1_sync: // Guest trapped into EL2 el1_sync: // Guest trapped into EL2
mrs x0, esr_el2 mrs x0, esr_el2
......
...@@ -52,7 +52,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ...@@ -52,7 +52,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
......
...@@ -965,6 +965,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -965,6 +965,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
...@@ -986,44 +990,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -986,44 +990,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
static bool access_cntp_tval(struct kvm_vcpu *vcpu, static bool access_arch_timer(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u64 now = kvm_phys_timer_read(); enum kvm_arch_timers tmr;
u64 cval; enum kvm_arch_timer_regs treg;
u64 reg = reg_to_encoding(r);
if (p->is_write) { switch (reg) {
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, case SYS_CNTP_TVAL_EL0:
p->regval + now); case SYS_AARCH32_CNTP_TVAL:
} else { tmr = TIMER_PTIMER;
cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); treg = TIMER_REG_TVAL;
p->regval = cval - now; break;
case SYS_CNTP_CTL_EL0:
case SYS_AARCH32_CNTP_CTL:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CTL;
break;
case SYS_CNTP_CVAL_EL0:
case SYS_AARCH32_CNTP_CVAL:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CVAL;
break;
default:
BUG();
} }
return true;
}
static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
else
p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
return true;
}
static bool access_cntp_cval(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) if (p->is_write)
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
else else
p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
return true; return true;
} }
...@@ -1148,6 +1146,64 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -1148,6 +1146,64 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return __set_id_reg(rd, uaddr, true); return __set_id_reg(rd, uaddr, true);
} }
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return write_to_read_only(vcpu, p, r);
p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
return true;
}
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return write_to_read_only(vcpu, p, r);
p->regval = read_sysreg(clidr_el1);
return true;
}
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
else
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
return true;
}
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u32 csselr;
if (p->is_write)
return write_to_read_only(vcpu, p, r);
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
p->regval = get_ccsidr(csselr);
/*
* Guests should not be doing cache operations by set/way at all, and
* for this reason, we trap them and attempt to infer the intent, so
* that we can flush the entire guest's address space at the appropriate
* time.
* To prevent this trapping from causing performance problems, let's
* expose the geometry of all data and unified caches (which are
* guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
* [If guests should attempt to infer aliasing properties from the
* geometry (which is not permitted by the architecture), they would
* only do so for virtually indexed caches.]
*/
if (!(csselr & 1)) // data or unified cache
p->regval &= ~GENMASK(27, 3);
return true;
}
/* sys_reg_desc initialiser for known cpufeature ID registers */ /* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \ #define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \ SYS_DESC(SYS_##name), \
...@@ -1365,7 +1421,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1365,7 +1421,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
...@@ -1388,9 +1447,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1388,9 +1447,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval }, { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
/* PMEVCNTRn_EL0 */ /* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0), PMU_PMEVCNTR_EL0(0),
...@@ -1464,7 +1523,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1464,7 +1523,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
}; };
static bool trap_dbgidr(struct kvm_vcpu *vcpu, static bool trap_dbgidr(struct kvm_vcpu *vcpu,
...@@ -1665,6 +1724,7 @@ static const struct sys_reg_desc cp14_64_regs[] = { ...@@ -1665,6 +1724,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register). * register).
*/ */
static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
...@@ -1711,10 +1771,9 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1711,10 +1771,9 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
/* CNTP_TVAL */ /* Arch Tmers */
{ Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval }, { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
/* CNTP_CTL */ { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
{ Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
/* PMEVCNTRn */ /* PMEVCNTRn */
PMU_PMEVCNTR(0), PMU_PMEVCNTR(0),
...@@ -1782,6 +1841,10 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1782,6 +1841,10 @@ static const struct sys_reg_desc cp15_regs[] = {
PMU_PMEVTYPER(30), PMU_PMEVTYPER(30),
/* PMCCFILTR */ /* PMCCFILTR */
{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
}; };
static const struct sys_reg_desc cp15_64_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = {
...@@ -1791,7 +1854,7 @@ static const struct sys_reg_desc cp15_64_regs[] = { ...@@ -1791,7 +1854,7 @@ static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
}; };
/* Target specific emulation tables */ /* Target specific emulation tables */
...@@ -1820,30 +1883,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target, ...@@ -1820,30 +1883,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target,
} }
} }
#define reg_to_match_value(x) \
({ \
unsigned long val; \
val = (x)->Op0 << 14; \
val |= (x)->Op1 << 11; \
val |= (x)->CRn << 7; \
val |= (x)->CRm << 3; \
val |= (x)->Op2; \
val; \
})
static int match_sys_reg(const void *key, const void *elt) static int match_sys_reg(const void *key, const void *elt)
{ {
const unsigned long pval = (unsigned long)key; const unsigned long pval = (unsigned long)key;
const struct sys_reg_desc *r = elt; const struct sys_reg_desc *r = elt;
return pval - reg_to_match_value(r); return pval - reg_to_encoding(r);
} }
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
const struct sys_reg_desc table[], const struct sys_reg_desc table[],
unsigned int num) unsigned int num)
{ {
unsigned long pval = reg_to_match_value(params); unsigned long pval = reg_to_encoding(params);
return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
} }
...@@ -2206,11 +2258,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, ...@@ -2206,11 +2258,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
} }
FUNCTION_INVARIANT(midr_el1) FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1) FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(clidr_el1) FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1) FUNCTION_INVARIANT(aidr_el1)
static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
{
((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
}
/* ->val is filled in by kvm_sys_reg_table_init() */ /* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] = { static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
......
...@@ -1206,6 +1206,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) ...@@ -1206,6 +1206,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
return ARCH_TIMER_PHYS_SECURE_PPI; return ARCH_TIMER_PHYS_SECURE_PPI;
} }
static void __init arch_timer_populate_kvm_info(void)
{
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
if (is_kernel_in_hyp_mode())
arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
}
static int __init arch_timer_of_init(struct device_node *np) static int __init arch_timer_of_init(struct device_node *np)
{ {
int i, ret; int i, ret;
...@@ -1220,7 +1227,7 @@ static int __init arch_timer_of_init(struct device_node *np) ...@@ -1220,7 +1227,7 @@ static int __init arch_timer_of_init(struct device_node *np)
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
arch_timer_ppi[i] = irq_of_parse_and_map(np, i); arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; arch_timer_populate_kvm_info();
rate = arch_timer_get_cntfrq(); rate = arch_timer_get_cntfrq();
arch_timer_of_configure_rate(rate, np); arch_timer_of_configure_rate(rate, np);
...@@ -1550,7 +1557,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table) ...@@ -1550,7 +1557,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
arch_timer_ppi[ARCH_TIMER_HYP_PPI] = arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; arch_timer_populate_kvm_info();
/* /*
* When probing via ACPI, we have no mechanism to override the sysreg * When probing via ACPI, we have no mechanism to override the sysreg
......
...@@ -74,6 +74,7 @@ enum arch_timer_spi_nr { ...@@ -74,6 +74,7 @@ enum arch_timer_spi_nr {
struct arch_timer_kvm_info { struct arch_timer_kvm_info {
struct timecounter timecounter; struct timecounter timecounter;
int virtual_irq; int virtual_irq;
int physical_irq;
}; };
struct arch_timer_mem_frame { struct arch_timer_mem_frame {
......
...@@ -22,7 +22,22 @@ ...@@ -22,7 +22,22 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
enum kvm_arch_timers {
TIMER_PTIMER,
TIMER_VTIMER,
NR_KVM_TIMERS
};
enum kvm_arch_timer_regs {
TIMER_REG_CNT,
TIMER_REG_CVAL,
TIMER_REG_TVAL,
TIMER_REG_CTL,
};
struct arch_timer_context { struct arch_timer_context {
struct kvm_vcpu *vcpu;
/* Registers: control register, timer value */ /* Registers: control register, timer value */
u32 cnt_ctl; u32 cnt_ctl;
u64 cnt_cval; u64 cnt_cval;
...@@ -30,30 +45,36 @@ struct arch_timer_context { ...@@ -30,30 +45,36 @@ struct arch_timer_context {
/* Timer IRQ */ /* Timer IRQ */
struct kvm_irq_level irq; struct kvm_irq_level irq;
/* Virtual offset */
u64 cntvoff;
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
/* /*
* We have multiple paths which can save/restore the timer state * We have multiple paths which can save/restore the timer state onto
* onto the hardware, so we need some way of keeping track of * the hardware, so we need some way of keeping track of where the
* where the latest state is. * latest state is.
*
* loaded == true: State is loaded on the hardware registers.
* loaded == false: State is stored in memory.
*/ */
bool loaded; bool loaded;
/* Virtual offset */ /* Duplicated state from arch_timer.c for convenience */
u64 cntvoff; u32 host_timer_irq;
u32 host_timer_irq_flags;
};
struct timer_map {
struct arch_timer_context *direct_vtimer;
struct arch_timer_context *direct_ptimer;
struct arch_timer_context *emul_ptimer;
}; };
struct arch_timer_cpu { struct arch_timer_cpu {
struct arch_timer_context vtimer; struct arch_timer_context timers[NR_KVM_TIMERS];
struct arch_timer_context ptimer;
/* Background timer used when the guest is not running */ /* Background timer used when the guest is not running */
struct hrtimer bg_timer; struct hrtimer bg_timer;
/* Physical timer emulation */
struct hrtimer phys_timer;
/* Is the timer enabled */ /* Is the timer enabled */
bool enabled; bool enabled;
}; };
...@@ -76,9 +97,6 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); ...@@ -76,9 +97,6 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
u64 kvm_phys_timer_read(void); u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
...@@ -88,7 +106,19 @@ void kvm_timer_init_vhe(void); ...@@ -88,7 +106,19 @@ void kvm_timer_init_vhe(void);
bool kvm_arch_timer_get_input_level(int vintid); bool kvm_arch_timer_get_input_level(int vintid);
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) #define vcpu_timer(v) (&(v)->arch.timer_cpu)
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) #define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg);
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg,
u64 val);
#endif #endif
This diff is collapsed.
...@@ -65,7 +65,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); ...@@ -65,7 +65,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */ /* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid; static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock); static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present; static bool vgic_present;
...@@ -142,7 +141,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -142,7 +141,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_vgic_early_init(kvm); kvm_vgic_early_init(kvm);
/* Mark the initial VMID generation invalid */ /* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0; kvm->arch.vmid.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */ /* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = vgic_present ? kvm->arch.max_vcpus = vgic_present ?
...@@ -336,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -336,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{ {
kvm_timer_schedule(vcpu);
kvm_vgic_v4_enable_doorbell(vcpu); kvm_vgic_v4_enable_doorbell(vcpu);
} }
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{ {
kvm_timer_unschedule(vcpu);
kvm_vgic_v4_disable_doorbell(vcpu); kvm_vgic_v4_disable_doorbell(vcpu);
} }
...@@ -472,37 +469,31 @@ void force_vm_exit(const cpumask_t *mask) ...@@ -472,37 +469,31 @@ void force_vm_exit(const cpumask_t *mask)
/** /**
* need_new_vmid_gen - check that the VMID is still valid * need_new_vmid_gen - check that the VMID is still valid
* @kvm: The VM's VMID to check * @vmid: The VMID to check
* *
* return true if there is a new generation of VMIDs being used * return true if there is a new generation of VMIDs being used
* *
* The hardware supports only 256 values with the value zero reserved for the * The hardware supports a limited set of values with the value zero reserved
* host, so we check if an assigned value belongs to a previous generation, * for the host, so we check if an assigned value belongs to a previous
* which which requires us to assign a new value. If we're the first to use a * generation, which which requires us to assign a new value. If we're the
* VMID for the new generation, we must flush necessary caches and TLBs on all * first to use a VMID for the new generation, we must flush necessary caches
* CPUs. * and TLBs on all CPUs.
*/ */
static bool need_new_vmid_gen(struct kvm *kvm) static bool need_new_vmid_gen(struct kvm_vmid *vmid)
{ {
u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen); return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
} }
/** /**
* update_vttbr - Update the VTTBR with a valid VMID before the guest runs * update_vmid - Update the vmid with a valid VMID for the current generation
* @kvm The guest that we are about to run * @kvm: The guest that struct vmid belongs to
* * @vmid: The stage-2 VMID information struct
* Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
* VM has a valid VMID, otherwise assigns a new one and flushes corresponding
* caches and TLBs.
*/ */
static void update_vttbr(struct kvm *kvm) static void update_vmid(struct kvm_vmid *vmid)
{ {
phys_addr_t pgd_phys; if (!need_new_vmid_gen(vmid))
u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
if (!need_new_vmid_gen(kvm))
return; return;
spin_lock(&kvm_vmid_lock); spin_lock(&kvm_vmid_lock);
...@@ -512,7 +503,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -512,7 +503,7 @@ static void update_vttbr(struct kvm *kvm)
* already allocated a valid vmid for this vm, then this vcpu should * already allocated a valid vmid for this vm, then this vcpu should
* use the same vmid. * use the same vmid.
*/ */
if (!need_new_vmid_gen(kvm)) { if (!need_new_vmid_gen(vmid)) {
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
return; return;
} }
...@@ -536,18 +527,12 @@ static void update_vttbr(struct kvm *kvm) ...@@ -536,18 +527,12 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context); kvm_call_hyp(__kvm_flush_vm_context);
} }
kvm->arch.vmid = kvm_next_vmid; vmid->vmid = kvm_next_vmid;
kvm_next_vmid++; kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
smp_wmb(); smp_wmb();
WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen)); WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
} }
...@@ -690,7 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -690,7 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/ */
cond_resched(); cond_resched();
update_vttbr(vcpu->kvm); update_vmid(&vcpu->kvm->arch.vmid);
check_vcpu_requests(vcpu); check_vcpu_requests(vcpu);
...@@ -739,7 +724,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -739,7 +724,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/ */
smp_store_mb(vcpu->mode, IN_GUEST_MODE); smp_store_mb(vcpu->mode, IN_GUEST_MODE);
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
kvm_request_pending(vcpu)) { kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */ isb(); /* Ensure work in x_flush_hwstate is committed */
...@@ -765,7 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -765,7 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_vcpu_run_vhe(vcpu); ret = kvm_vcpu_run_vhe(vcpu);
kvm_arm_vhe_guest_exit(); kvm_arm_vhe_guest_exit();
} else { } else {
ret = kvm_call_hyp(__kvm_vcpu_run_nvhe, vcpu); ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
} }
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
...@@ -1417,10 +1402,6 @@ static inline void hyp_cpu_pm_exit(void) ...@@ -1417,10 +1402,6 @@ static inline void hyp_cpu_pm_exit(void)
static int init_common_resources(void) static int init_common_resources(void)
{ {
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
kvm_set_ipa_limit(); kvm_set_ipa_limit();
return 0; return 0;
...@@ -1561,6 +1542,7 @@ static int init_hyp_mode(void) ...@@ -1561,6 +1542,7 @@ static int init_hyp_mode(void)
kvm_cpu_context_t *cpu_ctxt; kvm_cpu_context_t *cpu_ctxt;
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
kvm_init_host_cpu_context(cpu_ctxt, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) { if (err) {
...@@ -1571,7 +1553,7 @@ static int init_hyp_mode(void) ...@@ -1571,7 +1553,7 @@ static int init_hyp_mode(void)
err = hyp_map_aux_data(); err = hyp_map_aux_data();
if (err) if (err)
kvm_err("Cannot map host auxilary data: %d\n", err); kvm_err("Cannot map host auxiliary data: %d\n", err);
return 0; return 0;
......
...@@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
int i; int i;
u32 elrsr; u32 elrsr;
elrsr = read_gicreg(ICH_ELSR_EL2); elrsr = read_gicreg(ICH_ELRSR_EL2);
write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2); write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
......
...@@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, ...@@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
*/ */
int kvm_alloc_stage2_pgd(struct kvm *kvm) int kvm_alloc_stage2_pgd(struct kvm *kvm)
{ {
phys_addr_t pgd_phys;
pgd_t *pgd; pgd_t *pgd;
if (kvm->arch.pgd != NULL) { if (kvm->arch.pgd != NULL) {
...@@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) ...@@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!pgd) if (!pgd)
return -ENOMEM; return -ENOMEM;
pgd_phys = virt_to_phys(pgd);
if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
return -EINVAL;
kvm->arch.pgd = pgd; kvm->arch.pgd = pgd;
kvm->arch.pgd_phys = pgd_phys;
return 0; return 0;
} }
...@@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm) ...@@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, kvm_phys_size(kvm)); unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd); pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL; kvm->arch.pgd = NULL;
kvm->arch.pgd_phys = 0;
} }
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -1396,14 +1403,6 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) ...@@ -1396,14 +1403,6 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
return false; return false;
} }
static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
return kvm_vcpu_dabt_iswrite(vcpu);
}
/** /**
* stage2_wp_ptes - write protect PMD range * stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry * @pmd: pointer to pmd entry
...@@ -1598,14 +1597,13 @@ static void kvm_send_hwpoison_signal(unsigned long address, ...@@ -1598,14 +1597,13 @@ static void kvm_send_hwpoison_signal(unsigned long address,
static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
unsigned long hva) unsigned long hva)
{ {
gpa_t gpa_start, gpa_end; gpa_t gpa_start;
hva_t uaddr_start, uaddr_end; hva_t uaddr_start, uaddr_end;
size_t size; size_t size;
size = memslot->npages * PAGE_SIZE; size = memslot->npages * PAGE_SIZE;
gpa_start = memslot->base_gfn << PAGE_SHIFT; gpa_start = memslot->base_gfn << PAGE_SHIFT;
gpa_end = gpa_start + size;
uaddr_start = memslot->userspace_addr; uaddr_start = memslot->userspace_addr;
uaddr_end = uaddr_start + size; uaddr_end = uaddr_start + size;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H #define _TRACE_KVM_H
#include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
...@@ -262,10 +263,114 @@ TRACE_EVENT(kvm_timer_update_irq, ...@@ -262,10 +263,114 @@ TRACE_EVENT(kvm_timer_update_irq,
__entry->vcpu_id, __entry->irq, __entry->level) __entry->vcpu_id, __entry->irq, __entry->level)
); );
TRACE_EVENT(kvm_get_timer_map,
TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
TP_ARGS(vcpu_id, map),
TP_STRUCT__entry(
__field( unsigned long, vcpu_id )
__field( int, direct_vtimer )
__field( int, direct_ptimer )
__field( int, emul_ptimer )
),
TP_fast_assign(
__entry->vcpu_id = vcpu_id;
__entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
__entry->direct_ptimer =
(map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
__entry->emul_ptimer =
(map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
),
TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
__entry->vcpu_id,
__entry->direct_vtimer,
__entry->direct_ptimer,
__entry->emul_ptimer)
);
TRACE_EVENT(kvm_timer_save_state,
TP_PROTO(struct arch_timer_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field( unsigned long, ctl )
__field( unsigned long long, cval )
__field( int, timer_idx )
),
TP_fast_assign(
__entry->ctl = ctx->cnt_ctl;
__entry->cval = ctx->cnt_cval;
__entry->timer_idx = arch_timer_ctx_index(ctx);
),
TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
__entry->ctl,
__entry->cval,
__entry->timer_idx)
);
TRACE_EVENT(kvm_timer_restore_state,
TP_PROTO(struct arch_timer_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field( unsigned long, ctl )
__field( unsigned long long, cval )
__field( int, timer_idx )
),
TP_fast_assign(
__entry->ctl = ctx->cnt_ctl;
__entry->cval = ctx->cnt_cval;
__entry->timer_idx = arch_timer_ctx_index(ctx);
),
TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
__entry->ctl,
__entry->cval,
__entry->timer_idx)
);
TRACE_EVENT(kvm_timer_hrtimer_expire,
TP_PROTO(struct arch_timer_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field( int, timer_idx )
),
TP_fast_assign(
__entry->timer_idx = arch_timer_ctx_index(ctx);
),
TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx)
);
TRACE_EVENT(kvm_timer_emulate,
TP_PROTO(struct arch_timer_context *ctx, bool should_fire),
TP_ARGS(ctx, should_fire),
TP_STRUCT__entry(
__field( int, timer_idx )
__field( bool, should_fire )
),
TP_fast_assign(
__entry->timer_idx = arch_timer_ctx_index(ctx);
__entry->should_fire = should_fire;
),
TP_printk("arch_timer_ctx_index: %d (should_fire: %d)",
__entry->timer_idx, __entry->should_fire)
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm #define TRACE_INCLUDE_PATH ../../virt/kvm/arm
#undef TRACE_INCLUDE_FILE #undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
......
...@@ -589,7 +589,7 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); ...@@ -589,7 +589,7 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
*/ */
int vgic_v3_probe(const struct gic_kvm_info *info) int vgic_v3_probe(const struct gic_kvm_info *info)
{ {
u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
int ret; int ret;
/* /*
...@@ -679,7 +679,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) ...@@ -679,7 +679,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
if (likely(cpu_if->vgic_sre)) if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr); cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu); kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment