Commit 4db61fef authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations

In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC with separate annotations for standard C callable functions,
data and code with different calling conventions.

Using these for __smccc_workaround_1_smc is more involved than for most
symbols as this symbol is annotated quite unusually, rather than just have
the explicit symbol we define _start and _end symbols which we then use to
compute the length. This does not play at all nicely with the new style
macros. Instead define a constant for the size of the function and use that
in both the C code and for .org based size checks in the assembly code.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
parent 6e52aab9
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
*/ */
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/mm.h> #include <linux/mm.h>
...@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void); ...@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void); extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \ #define __hyp_this_cpu_ptr(sym) \
({ \ ({ \
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
static bool __maybe_unused static bool __maybe_unused
...@@ -113,9 +114,6 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); ...@@ -113,9 +114,6 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
{ {
...@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
raw_spin_unlock(&bp_lock); raw_spin_unlock(&bp_lock);
} }
#else #else
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
static void install_bp_hardening_cb(bp_hardening_cb_t fn, static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
...@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void) ...@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
smccc_end = NULL; smccc_end = NULL;
break; break;
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
case SMCCC_CONDUIT_SMC: case SMCCC_CONDUIT_SMC:
cb = call_smc_arch_workaround_1; cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start; smccc_start = __smccc_workaround_1_smc;
smccc_end = __smccc_workaround_1_smc_end; smccc_end = __smccc_workaround_1_smc +
__SMCCC_WORKAROUND_1_SMC_SZ;
break; break;
#endif
default: default:
return -1; return -1;
......
...@@ -322,7 +322,7 @@ SYM_CODE_END(__bp_harden_hyp_vecs) ...@@ -322,7 +322,7 @@ SYM_CODE_END(__bp_harden_hyp_vecs)
.popsection .popsection
ENTRY(__smccc_workaround_1_smc_start) SYM_CODE_START(__smccc_workaround_1_smc)
esb esb
sub sp, sp, #(8 * 4) sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)] stp x2, x3, [sp, #(8 * 0)]
...@@ -332,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start) ...@@ -332,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start)
ldp x2, x3, [sp, #(8 * 0)] ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)] ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4) add sp, sp, #(8 * 4)
ENTRY(__smccc_workaround_1_smc_end) 1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_CODE_END(__smccc_workaround_1_smc)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment