Commit 20791846 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas

arm64: use ENDPIPROC() to annotate position independent assembler routines

For more control over which functions are called with the MMU off or
with the UEFI 1:1 mapping active, annotate some assembler routines as
position independent. This is done by introducing ENDPIPROC(), which
replaces the ENDPROC() declaration of those routines.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent d4dddfdb
...@@ -193,4 +193,15 @@ lr .req x30 // link register ...@@ -193,4 +193,15 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym] str \src, [\tmp, :lo12:\sym]
.endm .endm
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
*/
#define ENDPIPROC(x) \
.globl __pi_##x; \
.type __pi_##x, %function; \
.set __pi_##x, x; \
.size __pi_##x, . - x; \
ENDPROC(x)
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -41,4 +41,4 @@ ENTRY(memchr) ...@@ -41,4 +41,4 @@ ENTRY(memchr)
ret ret
2: mov x0, #0 2: mov x0, #0
ret ret
ENDPROC(memchr) ENDPIPROC(memchr)
...@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 ) ...@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
.Lret0: .Lret0:
mov result, #0 mov result, #0
ret ret
ENDPROC(memcmp) ENDPIPROC(memcmp)
...@@ -71,4 +71,4 @@ ...@@ -71,4 +71,4 @@
ENTRY(memcpy) ENTRY(memcpy)
#include "copy_template.S" #include "copy_template.S"
ret ret
ENDPROC(memcpy) ENDPIPROC(memcpy)
...@@ -194,4 +194,4 @@ ENTRY(memmove) ...@@ -194,4 +194,4 @@ ENTRY(memmove)
tst count, #0x3f tst count, #0x3f
b.ne .Ltail63 b.ne .Ltail63
ret ret
ENDPROC(memmove) ENDPIPROC(memmove)
...@@ -213,4 +213,4 @@ ENTRY(memset) ...@@ -213,4 +213,4 @@ ENTRY(memset)
ands count, count, zva_bits_x ands count, count, zva_bits_x
b.ne .Ltail_maybe_long b.ne .Ltail_maybe_long
ret ret
ENDPROC(memset) ENDPIPROC(memset)
...@@ -231,4 +231,4 @@ CPU_BE( orr syndrome, diff, has_nul ) ...@@ -231,4 +231,4 @@ CPU_BE( orr syndrome, diff, has_nul )
lsr data1, data1, #56 lsr data1, data1, #56
sub result, data1, data2, lsr #56 sub result, data1, data2, lsr #56
ret ret
ENDPROC(strcmp) ENDPIPROC(strcmp)
...@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ ...@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
csinv data1, data1, xzr, le csinv data1, data1, xzr, le
csel data2, data2, data2a, le csel data2, data2, data2a, le
b .Lrealigned b .Lrealigned
ENDPROC(strlen) ENDPIPROC(strlen)
...@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul ) ...@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
.Lret0: .Lret0:
mov result, #0 mov result, #0
ret ret
ENDPROC(strncmp) ENDPIPROC(strncmp)
...@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area) ...@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
b.lo 1b b.lo 1b
dsb sy dsb sy
ret ret
ENDPROC(__flush_dcache_area) ENDPIPROC(__flush_dcache_area)
/* /*
* __inval_cache_range(start, end) * __inval_cache_range(start, end)
...@@ -131,7 +131,7 @@ __dma_inv_range: ...@@ -131,7 +131,7 @@ __dma_inv_range:
b.lo 2b b.lo 2b
dsb sy dsb sy
ret ret
ENDPROC(__inval_cache_range) ENDPIPROC(__inval_cache_range)
ENDPROC(__dma_inv_range) ENDPROC(__dma_inv_range)
/* /*
...@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range) ...@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
b.lo 1b b.lo 1b
dsb sy dsb sy
ret ret
ENDPROC(__dma_flush_range) ENDPIPROC(__dma_flush_range)
/* /*
* __dma_map_area(start, size, dir) * __dma_map_area(start, size, dir)
...@@ -184,7 +184,7 @@ ENTRY(__dma_map_area) ...@@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
cmp w2, #DMA_FROM_DEVICE cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_range b.eq __dma_inv_range
b __dma_clean_range b __dma_clean_range
ENDPROC(__dma_map_area) ENDPIPROC(__dma_map_area)
/* /*
* __dma_unmap_area(start, size, dir) * __dma_unmap_area(start, size, dir)
...@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area) ...@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
cmp w2, #DMA_TO_DEVICE cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_range b.ne __dma_inv_range
ret ret
ENDPROC(__dma_unmap_area) ENDPIPROC(__dma_unmap_area)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment