Commit 0343a7e4 authored by Mark Brown's avatar Mark Brown Committed by Will Deacon

arm64: kernel: Convert to modern annotations for assembly functions

In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the core kernel code to
the new macros.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20200501115430.37315-3-broonie@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 06607c7e
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the * branch to what would be the reset vector. It must be executed with the
* flat identity mapping. * flat identity mapping.
*/ */
ENTRY(__cpu_soft_restart) SYM_CODE_START(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */ /* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1 mrs x12, sctlr_el1
mov_q x13, SCTLR_ELx_FLAGS mov_q x13, SCTLR_ELx_FLAGS
...@@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart) ...@@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart)
mov x1, x3 // arg1 mov x1, x3 // arg1
mov x2, x4 // arg2 mov x2, x4 // arg2
br x8 br x8
ENDPROC(__cpu_soft_restart) SYM_CODE_END(__cpu_soft_restart)
.popsection .popsection
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
ENTRY(__efi_rt_asm_wrapper) SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]! stp x29, x30, [sp, #-32]!
mov x29, sp mov x29, sp
...@@ -35,4 +35,4 @@ ENTRY(__efi_rt_asm_wrapper) ...@@ -35,4 +35,4 @@ ENTRY(__efi_rt_asm_wrapper)
b.ne 0f b.ne 0f
ret ret
0: b efi_handle_corrupted_x18 // tail call 0: b efi_handle_corrupted_x18 // tail call
ENDPROC(__efi_rt_asm_wrapper) SYM_FUNC_END(__efi_rt_asm_wrapper)
...@@ -16,34 +16,34 @@ ...@@ -16,34 +16,34 @@
* *
* x0 - pointer to struct fpsimd_state * x0 - pointer to struct fpsimd_state
*/ */
ENTRY(fpsimd_save_state) SYM_FUNC_START(fpsimd_save_state)
fpsimd_save x0, 8 fpsimd_save x0, 8
ret ret
ENDPROC(fpsimd_save_state) SYM_FUNC_END(fpsimd_save_state)
/* /*
* Load the FP registers. * Load the FP registers.
* *
* x0 - pointer to struct fpsimd_state * x0 - pointer to struct fpsimd_state
*/ */
ENTRY(fpsimd_load_state) SYM_FUNC_START(fpsimd_load_state)
fpsimd_restore x0, 8 fpsimd_restore x0, 8
ret ret
ENDPROC(fpsimd_load_state) SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
ENTRY(sve_save_state) SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2 sve_save 0, x1, 2
ret ret
ENDPROC(sve_save_state) SYM_FUNC_END(sve_save_state)
ENTRY(sve_load_state) SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4 sve_load 0, x1, x2, 3, x4
ret ret
ENDPROC(sve_load_state) SYM_FUNC_END(sve_load_state)
ENTRY(sve_get_vl) SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1 _sve_rdvl 0, 1
ret ret
ENDPROC(sve_get_vl) SYM_FUNC_END(sve_get_vl)
#endif /* CONFIG_ARM64_SVE */ #endif /* CONFIG_ARM64_SVE */
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
* x5: physical address of a zero page that remains zero after resume * x5: physical address of a zero page that remains zero after resume
*/ */
.pushsection ".hibernate_exit.text", "ax" .pushsection ".hibernate_exit.text", "ax"
ENTRY(swsusp_arch_suspend_exit) SYM_CODE_START(swsusp_arch_suspend_exit)
/* /*
* We execute from ttbr0, change ttbr1 to our copied linear map tables * We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page * with a break-before-make via the zero page
...@@ -110,7 +110,7 @@ ENTRY(swsusp_arch_suspend_exit) ...@@ -110,7 +110,7 @@ ENTRY(swsusp_arch_suspend_exit)
cbz x24, 3f /* Do we need to re-initialise EL2? */ cbz x24, 3f /* Do we need to re-initialise EL2? */
hvc #0 hvc #0
3: ret 3: ret
ENDPROC(swsusp_arch_suspend_exit) SYM_CODE_END(swsusp_arch_suspend_exit)
/* /*
* Restore the hyp stub. * Restore the hyp stub.
...@@ -119,15 +119,15 @@ ENDPROC(swsusp_arch_suspend_exit) ...@@ -119,15 +119,15 @@ ENDPROC(swsusp_arch_suspend_exit)
* *
* x24: The physical address of __hyp_stub_vectors * x24: The physical address of __hyp_stub_vectors
*/ */
el1_sync: SYM_CODE_START_LOCAL(el1_sync)
msr vbar_el2, x24 msr vbar_el2, x24
eret eret
ENDPROC(el1_sync) SYM_CODE_END(el1_sync)
.macro invalid_vector label .macro invalid_vector label
\label: SYM_CODE_START_LOCAL(\label)
b \label b \label
ENDPROC(\label) SYM_CODE_END(\label)
.endm .endm
invalid_vector el2_sync_invalid invalid_vector el2_sync_invalid
...@@ -141,7 +141,7 @@ ENDPROC(\label) ...@@ -141,7 +141,7 @@ ENDPROC(\label)
/* el2 vectors - switch el2 here while we restore the memory image. */ /* el2 vectors - switch el2 here while we restore the memory image. */
.align 11 .align 11
ENTRY(hibernate_el2_vectors) SYM_CODE_START(hibernate_el2_vectors)
ventry el2_sync_invalid // Synchronous EL2t ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t ventry el2_fiq_invalid // FIQ EL2t
...@@ -161,6 +161,6 @@ ENTRY(hibernate_el2_vectors) ...@@ -161,6 +161,6 @@ ENTRY(hibernate_el2_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1 ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1
END(hibernate_el2_vectors) SYM_CODE_END(hibernate_el2_vectors)
.popsection .popsection
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
.align 11 .align 11
ENTRY(__hyp_stub_vectors) SYM_CODE_START(__hyp_stub_vectors)
ventry el2_sync_invalid // Synchronous EL2t ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t ventry el2_fiq_invalid // FIQ EL2t
...@@ -41,11 +41,11 @@ ENTRY(__hyp_stub_vectors) ...@@ -41,11 +41,11 @@ ENTRY(__hyp_stub_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1 ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__hyp_stub_vectors) SYM_CODE_END(__hyp_stub_vectors)
.align 11 .align 11
el1_sync: SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS cmp x0, #HVC_SET_VECTORS
b.ne 2f b.ne 2f
msr vbar_el2, x1 msr vbar_el2, x1
...@@ -68,12 +68,12 @@ el1_sync: ...@@ -68,12 +68,12 @@ el1_sync:
9: mov x0, xzr 9: mov x0, xzr
eret eret
ENDPROC(el1_sync) SYM_CODE_END(el1_sync)
.macro invalid_vector label .macro invalid_vector label
\label: SYM_CODE_START_LOCAL(\label)
b \label b \label
ENDPROC(\label) SYM_CODE_END(\label)
.endm .endm
invalid_vector el2_sync_invalid invalid_vector el2_sync_invalid
...@@ -106,15 +106,15 @@ ENDPROC(\label) ...@@ -106,15 +106,15 @@ ENDPROC(\label)
* initialisation entry point. * initialisation entry point.
*/ */
ENTRY(__hyp_set_vectors) SYM_FUNC_START(__hyp_set_vectors)
mov x1, x0 mov x1, x0
mov x0, #HVC_SET_VECTORS mov x0, #HVC_SET_VECTORS
hvc #0 hvc #0
ret ret
ENDPROC(__hyp_set_vectors) SYM_FUNC_END(__hyp_set_vectors)
ENTRY(__hyp_reset_vectors) SYM_FUNC_START(__hyp_reset_vectors)
mov x0, #HVC_RESET_VECTORS mov x0, #HVC_RESET_VECTORS
hvc #0 hvc #0
ret ret
ENDPROC(__hyp_reset_vectors) SYM_FUNC_END(__hyp_reset_vectors)
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
ldp x28, x29, [sp, #S_X28] ldp x28, x29, [sp, #S_X28]
.endm .endm
ENTRY(kretprobe_trampoline) SYM_CODE_START(kretprobe_trampoline)
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
save_all_base_regs save_all_base_regs
...@@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline) ...@@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline)
add sp, sp, #S_FRAME_SIZE add sp, sp, #S_FRAME_SIZE
ret ret
ENDPROC(kretprobe_trampoline) SYM_CODE_END(kretprobe_trampoline)
...@@ -5,81 +5,81 @@ ...@@ -5,81 +5,81 @@
#include <linux/linkage.h> #include <linux/linkage.h>
ENTRY(absolute_data64) SYM_FUNC_START(absolute_data64)
ldr x0, 0f ldr x0, 0f
ret ret
0: .quad sym64_abs 0: .quad sym64_abs
ENDPROC(absolute_data64) SYM_FUNC_END(absolute_data64)
ENTRY(absolute_data32) SYM_FUNC_START(absolute_data32)
ldr w0, 0f ldr w0, 0f
ret ret
0: .long sym32_abs 0: .long sym32_abs
ENDPROC(absolute_data32) SYM_FUNC_END(absolute_data32)
ENTRY(absolute_data16) SYM_FUNC_START(absolute_data16)
adr x0, 0f adr x0, 0f
ldrh w0, [x0] ldrh w0, [x0]
ret ret
0: .short sym16_abs, 0 0: .short sym16_abs, 0
ENDPROC(absolute_data16) SYM_FUNC_END(absolute_data16)
ENTRY(signed_movw) SYM_FUNC_START(signed_movw)
movz x0, #:abs_g2_s:sym64_abs movz x0, #:abs_g2_s:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs
ret ret
ENDPROC(signed_movw) SYM_FUNC_END(signed_movw)
ENTRY(unsigned_movw) SYM_FUNC_START(unsigned_movw)
movz x0, #:abs_g3:sym64_abs movz x0, #:abs_g3:sym64_abs
movk x0, #:abs_g2_nc:sym64_abs movk x0, #:abs_g2_nc:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs
ret ret
ENDPROC(unsigned_movw) SYM_FUNC_END(unsigned_movw)
.align 12 .align 12
.space 0xff8 .space 0xff8
ENTRY(relative_adrp) SYM_FUNC_START(relative_adrp)
adrp x0, sym64_rel adrp x0, sym64_rel
add x0, x0, #:lo12:sym64_rel add x0, x0, #:lo12:sym64_rel
ret ret
ENDPROC(relative_adrp) SYM_FUNC_END(relative_adrp)
.align 12 .align 12
.space 0xffc .space 0xffc
ENTRY(relative_adrp_far) SYM_FUNC_START(relative_adrp_far)
adrp x0, memstart_addr adrp x0, memstart_addr
add x0, x0, #:lo12:memstart_addr add x0, x0, #:lo12:memstart_addr
ret ret
ENDPROC(relative_adrp_far) SYM_FUNC_END(relative_adrp_far)
ENTRY(relative_adr) SYM_FUNC_START(relative_adr)
adr x0, sym64_rel adr x0, sym64_rel
ret ret
ENDPROC(relative_adr) SYM_FUNC_END(relative_adr)
ENTRY(relative_data64) SYM_FUNC_START(relative_data64)
adr x1, 0f adr x1, 0f
ldr x0, [x1] ldr x0, [x1]
add x0, x0, x1 add x0, x0, x1
ret ret
0: .quad sym64_rel - . 0: .quad sym64_rel - .
ENDPROC(relative_data64) SYM_FUNC_END(relative_data64)
ENTRY(relative_data32) SYM_FUNC_START(relative_data32)
adr x1, 0f adr x1, 0f
ldr w0, [x1] ldr w0, [x1]
add x0, x0, x1 add x0, x0, x1
ret ret
0: .long sym64_rel - . 0: .long sym64_rel - .
ENDPROC(relative_data32) SYM_FUNC_END(relative_data32)
ENTRY(relative_data16) SYM_FUNC_START(relative_data16)
adr x1, 0f adr x1, 0f
ldrsh w0, [x1] ldrsh w0, [x1]
add x0, x0, x1 add x0, x0, x1
ret ret
0: .short sym64_rel - ., 0 0: .short sym64_rel - ., 0
ENDPROC(relative_data16) SYM_FUNC_END(relative_data16)
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* control_code_page, a special page which has been set up to be preserved * control_code_page, a special page which has been set up to be preserved
* during the copy operation. * during the copy operation.
*/ */
ENTRY(arm64_relocate_new_kernel) SYM_CODE_START(arm64_relocate_new_kernel)
/* Setup the list loop variables. */ /* Setup the list loop variables. */
mov x18, x2 /* x18 = dtb address */ mov x18, x2 /* x18 = dtb address */
...@@ -111,7 +111,7 @@ ENTRY(arm64_relocate_new_kernel) ...@@ -111,7 +111,7 @@ ENTRY(arm64_relocate_new_kernel)
mov x3, xzr mov x3, xzr
br x17 br x17
ENDPROC(arm64_relocate_new_kernel) SYM_CODE_END(arm64_relocate_new_kernel)
.align 3 /* To keep the 64-bit values below naturally aligned. */ .align 3 /* To keep the 64-bit values below naturally aligned. */
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
* *
* x0 = struct sleep_stack_data area * x0 = struct sleep_stack_data area
*/ */
ENTRY(__cpu_suspend_enter) SYM_FUNC_START(__cpu_suspend_enter)
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
...@@ -95,10 +95,10 @@ ENTRY(__cpu_suspend_enter) ...@@ -95,10 +95,10 @@ ENTRY(__cpu_suspend_enter)
ldp x29, lr, [sp], #16 ldp x29, lr, [sp], #16
mov x0, #1 mov x0, #1
ret ret
ENDPROC(__cpu_suspend_enter) SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume) SYM_CODE_START(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl el2_setup // if in EL2 drop to EL1 cleanly
mov x0, #ARM64_CPU_RUNTIME mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup bl __cpu_setup
...@@ -107,11 +107,11 @@ ENTRY(cpu_resume) ...@@ -107,11 +107,11 @@ ENTRY(cpu_resume)
bl __enable_mmu bl __enable_mmu
ldr x8, =_cpu_resume ldr x8, =_cpu_resume
br x8 br x8
ENDPROC(cpu_resume) SYM_CODE_END(cpu_resume)
.ltorg .ltorg
.popsection .popsection
ENTRY(_cpu_resume) SYM_FUNC_START(_cpu_resume)
mrs x1, mpidr_el1 mrs x1, mpidr_el1
adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
...@@ -147,4 +147,4 @@ ENTRY(_cpu_resume) ...@@ -147,4 +147,4 @@ ENTRY(_cpu_resume)
ldp x29, lr, [x29] ldp x29, lr, [x29]
mov x0, #0 mov x0, #0
ret ret
ENDPROC(_cpu_resume) SYM_FUNC_END(_cpu_resume)
...@@ -30,9 +30,9 @@ ...@@ -30,9 +30,9 @@
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk) * struct arm_smccc_quirk *quirk)
*/ */
ENTRY(__arm_smccc_smc) SYM_FUNC_START(__arm_smccc_smc)
SMCCC smc SMCCC smc
ENDPROC(__arm_smccc_smc) SYM_FUNC_END(__arm_smccc_smc)
EXPORT_SYMBOL(__arm_smccc_smc) EXPORT_SYMBOL(__arm_smccc_smc)
/* /*
...@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__arm_smccc_smc) ...@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__arm_smccc_smc)
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk) * struct arm_smccc_quirk *quirk)
*/ */
ENTRY(__arm_smccc_hvc) SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc SMCCC hvc
ENDPROC(__arm_smccc_hvc) SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc) EXPORT_SYMBOL(__arm_smccc_hvc)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment