Commit b619d9aa authored by Andrew Scull's avatar Andrew Scull Committed by Marc Zyngier

KVM: arm64: Introduce hyp context

During __guest_enter, save and restore from a new hyp context rather
than the host context. This is preparation for separation of the hyp and
host context in nVHE.
Signed-off-by: default avatarAndrew Scull <ascull@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200915104643.2543892-9-ascull@google.com
parent 472fc011
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
#define read_sysreg_elx(r,nvh,vh) \ #define read_sysreg_elx(r,nvh,vh) \
...@@ -89,7 +90,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu); ...@@ -89,7 +90,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void); void deactivate_traps_vhe_put(void);
#endif #endif
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); u64 __guest_enter(struct kvm_vcpu *vcpu);
void __noreturn hyp_panic(void); void __noreturn hyp_panic(void);
#ifdef __KVM_NVHE_HYPERVISOR__ #ifdef __KVM_NVHE_HYPERVISOR__
......
...@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask); ...@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data); KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_hyp_ctxt);
KVM_NVHE_ALIAS(kvm_hyp_vector); KVM_NVHE_ALIAS(kvm_hyp_vector);
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
......
...@@ -47,6 +47,7 @@ __asm__(".arch_extension virt"); ...@@ -47,6 +47,7 @@ __asm__(".arch_extension virt");
#endif #endif
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
...@@ -1542,6 +1543,7 @@ static int init_hyp_mode(void) ...@@ -1542,6 +1543,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kvm_host_data *cpu_data; struct kvm_host_data *cpu_data;
struct kvm_cpu_context *hyp_ctxt;
unsigned long *vector; unsigned long *vector;
cpu_data = per_cpu_ptr(&kvm_host_data, cpu); cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
...@@ -1552,6 +1554,14 @@ static int init_hyp_mode(void) ...@@ -1552,6 +1554,14 @@ static int init_hyp_mode(void)
goto out_err; goto out_err;
} }
hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu);
err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP);
if (err) {
kvm_err("Cannot map hyp context: %d\n", err);
goto out_err;
}
vector = per_cpu_ptr(&kvm_hyp_vector, cpu); vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
err = create_hyp_mappings(vector, vector + 1, PAGE_HYP); err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
......
...@@ -57,15 +57,15 @@ ...@@ -57,15 +57,15 @@
.endm .endm
/* /*
* u64 __guest_enter(struct kvm_vcpu *vcpu, * u64 __guest_enter(struct kvm_vcpu *vcpu);
* struct kvm_cpu_context *host_ctxt);
*/ */
SYM_FUNC_START(__guest_enter) SYM_FUNC_START(__guest_enter)
// x0: vcpu // x0: vcpu
// x1: host context // x1-x17: clobbered by macros
// x2-x17: clobbered by macros
// x29: guest context // x29: guest context
hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
// Store the host regs // Store the host regs
save_callee_saved_regs x1 save_callee_saved_regs x1
...@@ -148,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) ...@@ -148,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest's sp_el0 // Store the guest's sp_el0
save_sp_el0 x1, x2 save_sp_el0 x1, x2
get_host_ctxt x2, x3 hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
// Macro ptrauth_switch_to_guest format: // Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
......
...@@ -381,7 +381,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) ...@@ -381,7 +381,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return false; return false;
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
__ptrauth_save_key(ctxt, APIA); __ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt, APIB); __ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt, APDA); __ptrauth_save_key(ctxt, APDA);
......
...@@ -209,7 +209,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -209,7 +209,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu);
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
......
...@@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -135,7 +135,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu);
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment