Commit 0ccbd98a authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64: entry: Annotate vector table and handlers as code

In an effort to clarify and simplify the annotation of assembly
functions new macros have been introduced. These replace ENTRY and
ENDPROC with two different annotations for normal functions and those
with unusual calling conventions. The vector table and handlers aren't
normal C style code so should be annotated as CODE.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent b8e50548
...@@ -465,7 +465,7 @@ alternative_endif ...@@ -465,7 +465,7 @@ alternative_endif
.pushsection ".entry.text", "ax" .pushsection ".entry.text", "ax"
.align 11 .align 11
ENTRY(vectors) SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t kernel_ventry 1, fiq_invalid // FIQ EL1t
...@@ -492,7 +492,7 @@ ENTRY(vectors) ...@@ -492,7 +492,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif #endif
END(vectors) SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
/* /*
...@@ -534,57 +534,57 @@ __bad_stack: ...@@ -534,57 +534,57 @@ __bad_stack:
ASM_BUG() ASM_BUG()
.endm .endm
el0_sync_invalid: SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC inv_entry 0, BAD_SYNC
ENDPROC(el0_sync_invalid) SYM_CODE_END(el0_sync_invalid)
el0_irq_invalid: SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ inv_entry 0, BAD_IRQ
ENDPROC(el0_irq_invalid) SYM_CODE_END(el0_irq_invalid)
el0_fiq_invalid: SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ inv_entry 0, BAD_FIQ
ENDPROC(el0_fiq_invalid) SYM_CODE_END(el0_fiq_invalid)
el0_error_invalid: SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR inv_entry 0, BAD_ERROR
ENDPROC(el0_error_invalid) SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
el0_fiq_invalid_compat: SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32 inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat) SYM_CODE_END(el0_fiq_invalid_compat)
#endif #endif
el1_sync_invalid: SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC inv_entry 1, BAD_SYNC
ENDPROC(el1_sync_invalid) SYM_CODE_END(el1_sync_invalid)
el1_irq_invalid: SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ inv_entry 1, BAD_IRQ
ENDPROC(el1_irq_invalid) SYM_CODE_END(el1_irq_invalid)
el1_fiq_invalid: SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ inv_entry 1, BAD_FIQ
ENDPROC(el1_fiq_invalid) SYM_CODE_END(el1_fiq_invalid)
el1_error_invalid: SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR inv_entry 1, BAD_ERROR
ENDPROC(el1_error_invalid) SYM_CODE_END(el1_error_invalid)
/* /*
* EL1 mode handlers. * EL1 mode handlers.
*/ */
.align 6 .align 6
el1_sync: SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1 kernel_entry 1
mov x0, sp mov x0, sp
bl el1_sync_handler bl el1_sync_handler
kernel_exit 1 kernel_exit 1
ENDPROC(el1_sync) SYM_CODE_END(el1_sync)
.align 6 .align 6
el1_irq: SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1 kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1 gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f enable_da_f
...@@ -639,42 +639,42 @@ alternative_else_nop_endif ...@@ -639,42 +639,42 @@ alternative_else_nop_endif
#endif #endif
kernel_exit 1 kernel_exit 1
ENDPROC(el1_irq) SYM_CODE_END(el1_irq)
/* /*
* EL0 mode handlers. * EL0 mode handlers.
*/ */
.align 6 .align 6
el0_sync: SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0 kernel_entry 0
mov x0, sp mov x0, sp
bl el0_sync_handler bl el0_sync_handler
b ret_to_user b ret_to_user
ENDPROC(el0_sync) SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.align 6 .align 6
el0_sync_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32 kernel_entry 0, 32
mov x0, sp mov x0, sp
bl el0_sync_compat_handler bl el0_sync_compat_handler
b ret_to_user b ret_to_user
ENDPROC(el0_sync_compat) SYM_CODE_END(el0_sync_compat)
.align 6 .align 6
el0_irq_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_irq_naked b el0_irq_naked
ENDPROC(el0_irq_compat) SYM_CODE_END(el0_irq_compat)
el0_error_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_error_naked b el0_error_naked
ENDPROC(el0_error_compat) SYM_CODE_END(el0_error_compat)
#endif #endif
.align 6 .align 6
el0_irq: SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0 kernel_entry 0
el0_irq_naked: el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0 gic_prio_irq_setup pmr=x20, tmp=x0
...@@ -696,9 +696,9 @@ el0_irq_naked: ...@@ -696,9 +696,9 @@ el0_irq_naked:
bl trace_hardirqs_on bl trace_hardirqs_on
#endif #endif
b ret_to_user b ret_to_user
ENDPROC(el0_irq) SYM_CODE_END(el0_irq)
el1_error: SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1 kernel_entry 1
mrs x1, esr_el1 mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2 gic_prio_kentry_setup tmp=x2
...@@ -706,9 +706,9 @@ el1_error: ...@@ -706,9 +706,9 @@ el1_error:
mov x0, sp mov x0, sp
bl do_serror bl do_serror
kernel_exit 1 kernel_exit 1
ENDPROC(el1_error) SYM_CODE_END(el1_error)
el0_error: SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0 kernel_entry 0
el0_error_naked: el0_error_naked:
mrs x25, esr_el1 mrs x25, esr_el1
...@@ -720,7 +720,7 @@ el0_error_naked: ...@@ -720,7 +720,7 @@ el0_error_naked:
bl do_serror bl do_serror
enable_da_f enable_da_f
b ret_to_user b ret_to_user
ENDPROC(el0_error) SYM_CODE_END(el0_error)
/* /*
* Ok, we need to do extra processing, enter the slow path. * Ok, we need to do extra processing, enter the slow path.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment