Commit c50cb043 authored by David Brazdil's avatar David Brazdil Committed by Marc Zyngier

KVM: arm64: Remove __hyp_text macro, use build rules instead

With nVHE code now fully separated from the rest of the kernel, the effects of
the __hyp_text macro (which had to be applied on all nVHE code) can be
achieved with build rules instead. The macro used to:
  (a) move code to .hyp.text ELF section, now done by renaming .text using
      `objcopy`, and
  (b) `notrace` and `__noscs` would negate effects of CC_FLAGS_FTRACE and
      CC_FLAGS_SCS, respectivelly, now those flags are  erased from
      KBUILD_CFLAGS (same way as in EFI stub).

Note that by removing __hyp_text from code shared with VHE, all VHE code is now
compiled into .text and without `notrace` and `__noscs`.

Use of '.pushsection .hyp.text' removed from assembly files as this is now also
covered by the build rules.

For MAINTAINERS: if needed to re-run, uses of macro were removed with the
following command. Formatting was fixed up manually.

  find arch/arm64/kvm/hyp -type f -name '*.c' -o -name '*.h' \
       -exec sed -i 's/ __hyp_text//g' {} +
Signed-off-by: default avatarDavid Brazdil <dbrazdil@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200625131420.71444-15-dbrazdil@google.com
parent c04dd455
...@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i ...@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
* Skip an instruction which has been emulated at hyp while most guest sysregs * Skip an instruction which has been emulated at hyp while most guest sysregs
* are live. * are live.
*/ */
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace __noscs
#define read_sysreg_elx(r,nvh,vh) \ #define read_sysreg_elx(r,nvh,vh) \
({ \ ({ \
u64 reg; \ u64 reg; \
......
...@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = { ...@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = {
/* /*
* Check if a trapped instruction should have been executed or not. * Check if a trapped instruction should have been executed or not.
*/ */
bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{ {
unsigned long cpsr; unsigned long cpsr;
u32 cpsr_cond; u32 cpsr_cond;
...@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) ...@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
* *
* IT[7:0] -> CPSR[26:25],CPSR[15:10] * IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/ */
static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{ {
unsigned long itbits, cond; unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
* kvm_skip_instr - skip a trapped instruction and proceed to the next * kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer * @vcpu: The vcpu pointer
*/ */
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
u32 pc = *vcpu_pc(vcpu); u32 pc = *vcpu_pc(vcpu);
bool is_thumb; bool is_thumb;
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8) #define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.text .text
.pushsection .hyp.text, "ax"
/* /*
* We treat x18 as callee-saved as the host may use it as a platform * We treat x18 as callee-saved as the host may use it as a platform
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <asm/fpsimdmacros.h> #include <asm/fpsimdmacros.h>
.text .text
.pushsection .hyp.text, "ax"
SYM_FUNC_START(__fpsimd_save_state) SYM_FUNC_START(__fpsimd_save_state)
fpsimd_save x0, 1 fpsimd_save x0, 1
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/mmu.h> #include <asm/mmu.h>
.text .text
.pushsection .hyp.text, "ax"
.macro do_el2_call .macro do_el2_call
/* /*
......
...@@ -88,9 +88,9 @@ ...@@ -88,9 +88,9 @@
default: write_debug(ptr[0], reg, 0); \ default: write_debug(ptr[0], reg, 0); \
} }
static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, static inline void __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg, struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt) struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -107,9 +107,9 @@ static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, ...@@ -107,9 +107,9 @@ static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
} }
static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg, struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt) struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -127,7 +127,7 @@ static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, ...@@ -127,7 +127,7 @@ static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
} }
static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
...@@ -146,7 +146,7 @@ static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vc ...@@ -146,7 +146,7 @@ static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vc
__debug_restore_state(vcpu, guest_dbg, guest_ctxt); __debug_restore_state(vcpu, guest_dbg, guest_ctxt);
} }
static inline void __hyp_text __debug_switch_to_host_common(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
extern const char __hyp_panic_string[]; extern const char __hyp_panic_string[];
/* Check whether the FP regs were dirtied while in the host-side run loop: */ /* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{ {
/* /*
* When the system doesn't support FP/SIMD, we cannot rely on * When the system doesn't support FP/SIMD, we cannot rely on
...@@ -48,7 +48,7 @@ static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) ...@@ -48,7 +48,7 @@ static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
} }
/* Save the 32-bit only FPSIMD system register state */ /* Save the 32-bit only FPSIMD system register state */
static inline void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{ {
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return; return;
...@@ -56,7 +56,7 @@ static inline void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) ...@@ -56,7 +56,7 @@ static inline void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2); vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
} }
static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
{ {
/* /*
* We are about to set CPTR_EL2.TFP to trap all floating point * We are about to set CPTR_EL2.TFP to trap all floating point
...@@ -73,7 +73,7 @@ static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) ...@@ -73,7 +73,7 @@ static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
} }
} }
static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu) static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{ {
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2); write_sysreg(1 << 15, hstr_el2);
...@@ -89,13 +89,13 @@ static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu) ...@@ -89,13 +89,13 @@ static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
} }
static inline void __hyp_text __deactivate_traps_common(void) static inline void __deactivate_traps_common(void)
{ {
write_sysreg(0, hstr_el2); write_sysreg(0, hstr_el2);
write_sysreg(0, pmuserenr_el0); write_sysreg(0, pmuserenr_el0);
} }
static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu) static inline void ___activate_traps(struct kvm_vcpu *vcpu)
{ {
u64 hcr = vcpu->arch.hcr_el2; u64 hcr = vcpu->arch.hcr_el2;
...@@ -108,7 +108,7 @@ static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu) ...@@ -108,7 +108,7 @@ static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
} }
static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu) static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
{ {
/* /*
* If we pended a virtual abort, preserve it until it gets * If we pended a virtual abort, preserve it until it gets
...@@ -122,12 +122,12 @@ static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -122,12 +122,12 @@ static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
} }
} }
static inline void __hyp_text __activate_vm(struct kvm *kvm) static inline void __activate_vm(struct kvm *kvm)
{ {
__load_guest_stage2(kvm); __load_guest_stage2(kvm);
} }
static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
{ {
u64 par, tmp; u64 par, tmp;
...@@ -156,7 +156,7 @@ static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) ...@@ -156,7 +156,7 @@ static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true; return true;
} }
static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
{ {
u8 ec; u8 ec;
u64 esr; u64 esr;
...@@ -196,7 +196,7 @@ static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) ...@@ -196,7 +196,7 @@ static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
} }
/* Check for an FPSIMD/SVE trap and handle as appropriate */ /* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{ {
bool vhe, sve_guest, sve_host; bool vhe, sve_guest, sve_host;
u8 hsr_ec; u8 hsr_ec;
...@@ -283,7 +283,7 @@ static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -283,7 +283,7 @@ static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true; return true;
} }
static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
{ {
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu)); u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu); int rt = kvm_vcpu_sys_get_rt(vcpu);
...@@ -338,7 +338,7 @@ static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) ...@@ -338,7 +338,7 @@ static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true; return true;
} }
static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr) static inline bool esr_is_ptrauth_trap(u32 esr)
{ {
u32 ec = ESR_ELx_EC(esr); u32 ec = ESR_ELx_EC(esr);
...@@ -371,7 +371,7 @@ static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr) ...@@ -371,7 +371,7 @@ static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
}) })
static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *ctxt; struct kvm_cpu_context *ctxt;
u64 val; u64 val;
...@@ -401,7 +401,7 @@ static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) ...@@ -401,7 +401,7 @@ static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
* the guest, false when we should restore the host state and return to the * the guest, false when we should restore the host state and return to the
* main run loop. * main run loop.
*/ */
static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
...@@ -473,7 +473,7 @@ static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_ ...@@ -473,7 +473,7 @@ static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_
return false; return false;
} }
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
{ {
if (!cpus_have_final_cap(ARM64_SSBD)) if (!cpus_have_final_cap(ARM64_SSBD))
return false; return false;
...@@ -481,7 +481,7 @@ static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) ...@@ -481,7 +481,7 @@ static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
} }
static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_ARM64_SSBD #ifdef CONFIG_ARM64_SSBD
/* /*
...@@ -494,7 +494,7 @@ static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu ...@@ -494,7 +494,7 @@ static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu
#endif #endif
} }
static inline void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_ARM64_SSBD #ifdef CONFIG_ARM64_SSBD
/* /*
......
...@@ -15,18 +15,18 @@ ...@@ -15,18 +15,18 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
static inline void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
} }
static inline void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
} }
static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
...@@ -51,7 +51,7 @@ static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ct ...@@ -51,7 +51,7 @@ static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ct
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
} }
static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
...@@ -60,18 +60,18 @@ static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_cont ...@@ -60,18 +60,18 @@ static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_cont
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
} }
static inline void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
} }
static inline void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
} }
static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
...@@ -130,7 +130,7 @@ static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context ...@@ -130,7 +130,7 @@ static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
} }
static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{ {
u64 pstate = ctxt->gp_regs.regs.pstate; u64 pstate = ctxt->gp_regs.regs.pstate;
u64 mode = pstate & PSR_AA32_MODE_MASK; u64 mode = pstate & PSR_AA32_MODE_MASK;
...@@ -156,7 +156,7 @@ static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_c ...@@ -156,7 +156,7 @@ static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_c
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
} }
static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
...@@ -178,7 +178,7 @@ static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) ...@@ -178,7 +178,7 @@ static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
} }
static inline void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
......
...@@ -21,7 +21,13 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE ...@@ -21,7 +21,13 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
$(call if_changed,hypcopy) $(call if_changed,hypcopy)
quiet_cmd_hypcopy = HYPCOPY $@ quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@ cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
--rename-section=.text=.hyp.text \
$< $@
# Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace' and '__noscs' annotations.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# KVM nVHE code is run at a different exception code with a different map, so # KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may # compiler instrumentation that inserts callbacks or checks into the code may
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
static void __hyp_text __debug_save_spe(u64 *pmscr_el1) static void __debug_save_spe(u64 *pmscr_el1)
{ {
u64 reg; u64 reg;
...@@ -46,7 +46,7 @@ static void __hyp_text __debug_save_spe(u64 *pmscr_el1) ...@@ -46,7 +46,7 @@ static void __hyp_text __debug_save_spe(u64 *pmscr_el1)
dsb(nsh); dsb(nsh);
} }
static void __hyp_text __debug_restore_spe(u64 pmscr_el1) static void __debug_restore_spe(u64 pmscr_el1)
{ {
if (!pmscr_el1) if (!pmscr_el1)
return; return;
...@@ -58,20 +58,20 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1) ...@@ -58,20 +58,20 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
} }
void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{ {
/* Disable and flush SPE data generation */ /* Disable and flush SPE data generation */
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
__debug_switch_to_guest_common(vcpu); __debug_switch_to_guest_common(vcpu);
} }
void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{ {
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
__debug_switch_to_host_common(vcpu); __debug_switch_to_host_common(vcpu);
} }
u32 __hyp_text __kvm_get_mdcr_el2(void) u32 __kvm_get_mdcr_el2(void)
{ {
return read_sysreg(mdcr_el2); return read_sysreg(mdcr_el2);
} }
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) static void __activate_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val;
...@@ -58,7 +58,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) ...@@ -58,7 +58,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
} }
} }
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu)
{ {
u64 mdcr_el2; u64 mdcr_el2;
...@@ -93,13 +93,13 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -93,13 +93,13 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
} }
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) static void __deactivate_vm(struct kvm_vcpu *vcpu)
{ {
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
} }
/* Save VGICv3 state on non-VHE systems */ /* Save VGICv3 state on non-VHE systems */
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu) static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
...@@ -108,7 +108,7 @@ static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu) ...@@ -108,7 +108,7 @@ static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
} }
/* Restore VGICv3 state on non_VEH systems */ /* Restore VGICv3 state on non_VEH systems */
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
...@@ -119,7 +119,7 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) ...@@ -119,7 +119,7 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
/** /**
* Disable host events, enable guest events * Disable host events, enable guest events
*/ */
static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
{ {
struct kvm_host_data *host; struct kvm_host_data *host;
struct kvm_pmu_events *pmu; struct kvm_pmu_events *pmu;
...@@ -139,7 +139,7 @@ static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) ...@@ -139,7 +139,7 @@ static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
/** /**
* Disable guest events, enable host events * Disable guest events, enable host events
*/ */
static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
{ {
struct kvm_host_data *host; struct kvm_host_data *host;
struct kvm_pmu_events *pmu; struct kvm_pmu_events *pmu;
...@@ -155,7 +155,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) ...@@ -155,7 +155,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
} }
/* Switch to the guest for legacy non-VHE systems */ /* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
...@@ -242,7 +242,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -242,7 +242,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
return exit_code; return exit_code;
} }
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{ {
u64 spsr = read_sysreg_el2(SYS_SPSR); u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR); u64 elr = read_sysreg_el2(SYS_ELR);
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* Non-VHE: Both host and guest must save everything. * Non-VHE: Both host and guest must save everything.
*/ */
void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_save_el1_state(ctxt); __sysreg_save_el1_state(ctxt);
__sysreg_save_common_state(ctxt); __sysreg_save_common_state(ctxt);
...@@ -26,7 +26,7 @@ void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) ...@@ -26,7 +26,7 @@ void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
__sysreg_save_el2_return_state(ctxt); __sysreg_save_el2_return_state(ctxt);
} }
void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_restore_el1_state(ctxt); __sysreg_restore_el1_state(ctxt);
__sysreg_restore_common_state(ctxt); __sysreg_restore_common_state(ctxt);
...@@ -34,7 +34,7 @@ void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) ...@@ -34,7 +34,7 @@ void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
__sysreg_restore_el2_return_state(ctxt); __sysreg_restore_el2_return_state(ctxt);
} }
void __hyp_text __kvm_enable_ssbs(void) void __kvm_enable_ssbs(void)
{ {
u64 tmp; u64 tmp;
......
...@@ -19,7 +19,7 @@ void __kvm_timer_set_cntvoff(u64 cntvoff) ...@@ -19,7 +19,7 @@ void __kvm_timer_set_cntvoff(u64 cntvoff)
* Should only be called on non-VHE systems. * Should only be called on non-VHE systems.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/ */
void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) void __timer_disable_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val;
...@@ -33,7 +33,7 @@ void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) ...@@ -33,7 +33,7 @@ void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
* Should only be called on non-VHE systems. * Should only be called on non-VHE systems.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/ */
void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) void __timer_enable_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val;
......
...@@ -12,8 +12,7 @@ struct tlb_inv_context { ...@@ -12,8 +12,7 @@ struct tlb_inv_context {
u64 tcr; u64 tcr;
}; };
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
struct tlb_inv_context *cxt)
{ {
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
...@@ -36,8 +35,7 @@ static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, ...@@ -36,8 +35,7 @@ static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
struct tlb_inv_context *cxt)
{ {
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
...@@ -49,7 +47,7 @@ static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, ...@@ -49,7 +47,7 @@ static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
} }
} }
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -103,7 +101,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -103,7 +101,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) void __kvm_tlb_flush_vmid(struct kvm *kvm)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -120,7 +118,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) ...@@ -120,7 +118,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -135,7 +133,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ...@@ -135,7 +133,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
{ {
dsb(ishst); dsb(ishst);
__tlbi(alle1is); __tlbi(alle1is);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) static bool __is_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT); return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
...@@ -32,7 +32,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) ...@@ -32,7 +32,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
* 0: Not a GICV access * 0: Not a GICV access
* -1: Illegal GICV access successfully performed * -1: Illegal GICV access successfully performed
*/ */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment