Commit 71071acf authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: hyp: Use ctxt_sys_reg/__vcpu_sys_reg instead of raw sys_regs access

Switch the hypervisor code to using ctxt_sys_reg/__vcpu_sys_reg instead
of raw sys_regs accesses. No intended functionnal change.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 1b422dd7
...@@ -561,7 +561,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); ...@@ -561,7 +561,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
} }
static inline bool kvm_arch_requires_vhe(void) static inline bool kvm_arch_requires_vhe(void)
......
...@@ -104,7 +104,7 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu, ...@@ -104,7 +104,7 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu,
save_debug(dbg->dbg_wcr, dbgwcr, wrps); save_debug(dbg->dbg_wcr, dbgwcr, wrps);
save_debug(dbg->dbg_wvr, dbgwvr, wrps); save_debug(dbg->dbg_wvr, dbgwvr, wrps);
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1);
} }
static inline void __debug_restore_state(struct kvm_vcpu *vcpu, static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
...@@ -124,7 +124,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu, ...@@ -124,7 +124,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
restore_debug(dbg->dbg_wcr, dbgwcr, wrps); restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
restore_debug(dbg->dbg_wvr, dbgwvr, wrps); restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1);
} }
static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
......
...@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) ...@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return; return;
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2); __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
} }
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
...@@ -268,15 +268,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -268,15 +268,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
sve_load_state(vcpu_sve_pffr(vcpu), sve_load_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
} else { } else {
__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
} }
/* Skip restoring fpexc32 for AArch64 guests */ /* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW)) if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
fpexc32_el2);
vcpu->arch.flags |= KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
......
...@@ -17,34 +17,34 @@ ...@@ -17,34 +17,34 @@
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
} }
static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
} }
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR);
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR); ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR); ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0); ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1); ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR); ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR);
ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR); ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR);
ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR); ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR);
ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR); ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR); ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL); ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR); ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
...@@ -57,55 +57,55 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) ...@@ -57,55 +57,55 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
} }
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
} }
static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
} }
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1), csselr_el1);
if (has_vhe() || if (has_vhe() ||
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
} else if (!ctxt->__hyp_running_vcpu) { } else if (!ctxt->__hyp_running_vcpu) {
/* /*
* Must only be done for guest registers, hence the context * Must only be done for guest registers, hence the context
* test. We're coming from the host, so SCTLR.M is already * test. We're coming from the host, so SCTLR.M is already
* set. Pairs with nVHE's __activate_traps(). * set. Pairs with nVHE's __activate_traps().
*/ */
write_sysreg_el1((ctxt->sys_regs[TCR_EL1] | write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
TCR_EPD1_MASK | TCR_EPD0_MASK), TCR_EPD1_MASK | TCR_EPD0_MASK),
SYS_TCR); SYS_TCR);
isb(); isb();
} }
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR); write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0); write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1); write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR); write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR);
write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR); write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR);
write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR); write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR); write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR); write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL); write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
if (!has_vhe() && if (!has_vhe() &&
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) && cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
...@@ -120,9 +120,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -120,9 +120,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
* deconfigured and disabled. We can now restore the host's * deconfigured and disabled. We can now restore the host's
* S1 configuration: SCTLR, and only then TCR. * S1 configuration: SCTLR, and only then TCR.
*/ */
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
isb(); isb();
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
} }
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
...@@ -153,51 +153,49 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx ...@@ -153,51 +153,49 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
write_sysreg_el2(pstate, SYS_SPSR); write_sysreg_el2(pstate, SYS_SPSR);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
} }
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr;
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return; return;
spsr = vcpu->arch.ctxt.gp_regs.spsr; spsr = vcpu->arch.ctxt.gp_regs.spsr;
sysreg = vcpu->arch.ctxt.sys_regs;
spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt); spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
spsr[KVM_SPSR_UND] = read_sysreg(spsr_und); spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq); spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq); spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
} }
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr;
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return; return;
spsr = vcpu->arch.ctxt.gp_regs.spsr; spsr = vcpu->arch.ctxt.gp_regs.spsr;
sysreg = vcpu->arch.ctxt.sys_regs;
write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt); write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
write_sysreg(spsr[KVM_SPSR_UND], spsr_und); write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq); write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq); write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
write_sysreg(sysreg[DACR32_EL2], dacr32_el2); write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2); write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
} }
#endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
...@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
* configured and enabled. We can now restore the guest's S1 * configured and enabled. We can now restore the guest's S1
* configuration: SCTLR, and only then TCR. * configuration: SCTLR, and only then TCR.
*/ */
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
isb(); isb();
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment