Commit d8381a1d authored by David S. Miller's avatar David S. Miller

[SPARC64]: Add .type and .size directives to some asm files.

Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 1914ba52
...@@ -117,6 +117,7 @@ ...@@ -117,6 +117,7 @@
.align 64 .align 64
.globl FUNC_NAME .globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE PREAMBLE
mov %o0, %g5 mov %o0, %g5
...@@ -550,3 +551,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -550,3 +551,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o1, 1, %o1 add %o1, 1, %o1
retl retl
mov EX_RETVAL(%g5), %o0 mov EX_RETVAL(%g5), %o0
.size FUNC_NAME, .-FUNC_NAME
...@@ -78,6 +78,7 @@ ...@@ -78,6 +78,7 @@
*/ */
.globl FUNC_NAME .globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE PREAMBLE
mov %o0, %g5 mov %o0, %g5
...@@ -412,3 +413,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -412,3 +413,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o1, 1, %o1 add %o1, 1, %o1
retl retl
mov EX_RETVAL(%g5), %o0 mov EX_RETVAL(%g5), %o0
.size FUNC_NAME, .-FUNC_NAME
...@@ -22,9 +22,11 @@ ...@@ -22,9 +22,11 @@
flush %g2; flush %g2;
.globl cheetah_patch_copyops .globl cheetah_patch_copyops
.type cheetah_patch_copyops,#function
cheetah_patch_copyops: cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy) ULTRA3_DO_PATCH(memcpy, U3memcpy)
ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user) ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user) ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
retl retl
nop nop
.size cheetah_patch_copyops,.-cheetah_patch_copyops
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
.align 64 .align 64
.globl __atomic_add .globl __atomic_add
.type __atomic_add,#function
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5 lduw [%o1], %g5
add %g5, %o0, %g7 add %g5, %o0, %g7
...@@ -19,8 +20,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -19,8 +20,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
retl retl
add %g7, %o0, %o0 add %g7, %o0, %o0
.size __atomic_add, .-__atomic_add
.globl __atomic_sub .globl __atomic_sub
.type __atomic_sub,#function
__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5 lduw [%o1], %g5
sub %g5, %o0, %g7 sub %g5, %o0, %g7
...@@ -30,8 +33,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -30,8 +33,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
retl retl
sub %g7, %o0, %o0 sub %g7, %o0, %o0
.size __atomic_sub, .-__atomic_sub
.globl __atomic64_add .globl __atomic64_add
.type __atomic64_add,#function
__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ __atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5 ldx [%o1], %g5
add %g5, %o0, %g7 add %g5, %o0, %g7
...@@ -41,8 +46,10 @@ __atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -41,8 +46,10 @@ __atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
retl retl
add %g7, %o0, %o0 add %g7, %o0, %o0
.size __atomic64_add, .-__atomic64_add
.globl __atomic64_sub .globl __atomic64_sub
.type __atomic64_sub,#function
__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5 ldx [%o1], %g5
sub %g5, %o0, %g7 sub %g5, %o0, %g7
...@@ -52,4 +59,4 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -52,4 +59,4 @@ __atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
retl retl
sub %g7, %o0, %o0 sub %g7, %o0, %o0
.size __atomic64_sub, .-__atomic64_sub
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
.text .text
.align 64 .align 64
.globl ___test_and_set_bit .globl ___test_and_set_bit
.type ___test_and_set_bit,#function
___test_and_set_bit: /* %o0=nr, %o1=addr */ ___test_and_set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
...@@ -26,8 +27,10 @@ ___test_and_set_bit: /* %o0=nr, %o1=addr */ ...@@ -26,8 +27,10 @@ ___test_and_set_bit: /* %o0=nr, %o1=addr */
ldx [%o1], %g7 ldx [%o1], %g7
2: retl 2: retl
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
.size ___test_and_set_bit, .-___test_and_set_bit
.globl ___test_and_clear_bit .globl ___test_and_clear_bit
.type ___test_and_clear_bit,#function
___test_and_clear_bit: /* %o0=nr, %o1=addr */ ___test_and_clear_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
...@@ -45,8 +48,10 @@ ___test_and_clear_bit: /* %o0=nr, %o1=addr */ ...@@ -45,8 +48,10 @@ ___test_and_clear_bit: /* %o0=nr, %o1=addr */
ldx [%o1], %g7 ldx [%o1], %g7
2: retl 2: retl
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
.size ___test_and_clear_bit, .-___test_and_clear_bit
.globl ___test_and_change_bit .globl ___test_and_change_bit
.type ___test_and_change_bit,#function
___test_and_change_bit: /* %o0=nr, %o1=addr */ ___test_and_change_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
...@@ -64,3 +69,4 @@ ___test_and_change_bit: /* %o0=nr, %o1=addr */ ...@@ -64,3 +69,4 @@ ___test_and_change_bit: /* %o0=nr, %o1=addr */
2: retl 2: retl
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
nop nop
.size ___test_and_change_bit, .-___test_and_change_bit
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
* to copy register windows around during thread cloning. * to copy register windows around during thread cloning.
*/ */
.globl ___copy_in_user .globl ___copy_in_user
.type ___copy_in_user,#function
___copy_in_user: /* %o0=dst, %o1=src, %o2=len */ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it. /* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively * Reading %asi to check for KERNEL_DS is comparatively
...@@ -99,11 +100,14 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */ ...@@ -99,11 +100,14 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
retl retl
clr %o0 clr %o0
.size ___copy_in_user, .-___copy_in_user
/* Act like copy_{to,in}_user(), ie. return zero instead /* Act like copy_{to,in}_user(), ie. return zero instead
* of original destination pointer. This is invoked when * of original destination pointer. This is invoked when
* copy_{to,in}_user() finds that %asi is kernel space. * copy_{to,in}_user() finds that %asi is kernel space.
*/ */
.globl memcpy_user_stub .globl memcpy_user_stub
.type memcpy_user_stub,#function
memcpy_user_stub: memcpy_user_stub:
save %sp, -192, %sp save %sp, -192, %sp
mov %i0, %o0 mov %i0, %o0
...@@ -112,3 +116,4 @@ memcpy_user_stub: ...@@ -112,3 +116,4 @@ memcpy_user_stub:
mov %i2, %o2 mov %i2, %o2
ret ret
restore %g0, %g0, %o0 restore %g0, %g0, %o0
.size memcpy_user_stub, .-memcpy_user_stub
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
.align 32 .align 32
.globl copy_user_page .globl copy_user_page
.type copy_user_page,#function
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4 lduw [%g6 + TI_PRE_COUNT], %o4
sethi %uhi(PAGE_OFFSET), %g2 sethi %uhi(PAGE_OFFSET), %g2
...@@ -237,3 +238,5 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ ...@@ -237,3 +238,5 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
retl retl
stw %o4, [%g6 + TI_PRE_COUNT] stw %o4, [%g6 + TI_PRE_COUNT]
.size copy_user_page, .-copy_user_page
.text .text
.align 32 .align 32
.globl ip_fast_csum .globl ip_fast_csum
.type ip_fast_csum,#function
ip_fast_csum: /* %o0 = iph, %o1 = ihl */ ip_fast_csum: /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7 sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2 lduw [%o0 + 0x00], %o2
...@@ -30,3 +31,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */ ...@@ -30,3 +31,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */
set 0xffff, %o1 set 0xffff, %o1
retl retl
and %o2, %o1, %o0 and %o2, %o1, %o0
.size ip_fast_csum, .-ip_fast_csum
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
*/ */
.text .text
.align 32 .align 32
.globl memmove .globl memmove
.type memmove,#function
memmove: memmove:
mov %o0, %g1 mov %o0, %g1
cmp %o0, %o1 cmp %o0, %o1
...@@ -29,3 +30,4 @@ memmove: ...@@ -29,3 +30,4 @@ memmove:
retl retl
mov %g1, %o0 mov %g1, %o0
.size memmove, .-memmove
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
.align 64 .align 64
.globl _raw_spin_lock .globl _raw_spin_lock
.type _raw_spin_lock,#function
_raw_spin_lock: /* %o0 = lock_ptr */ _raw_spin_lock: /* %o0 = lock_ptr */
1: ldstub [%o0], %g7 1: ldstub [%o0], %g7
brnz,pn %g7, 2f brnz,pn %g7, 2f
...@@ -17,8 +18,10 @@ _raw_spin_lock: /* %o0 = lock_ptr */ ...@@ -17,8 +18,10 @@ _raw_spin_lock: /* %o0 = lock_ptr */
brnz,pt %g7, 2b brnz,pt %g7, 2b
membar #LoadLoad membar #LoadLoad
ba,a,pt %xcc, 1b ba,a,pt %xcc, 1b
.size _raw_spin_lock, .-_raw_spin_lock
.globl _raw_spin_lock_flags .globl _raw_spin_lock_flags
.type _raw_spin_lock_flags,#function
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */ _raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7 1: ldstub [%o0], %g7
brnz,pn %g7, 2f brnz,pn %g7, 2f
...@@ -33,3 +36,4 @@ _raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */ ...@@ -33,3 +36,4 @@ _raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
membar #LoadLoad membar #LoadLoad
ba,pt %xcc, 1b ! Retry lock acquire ba,pt %xcc, 1b ! Retry lock acquire
wrpr %g2, %pil ! Restore PIL wrpr %g2, %pil ! Restore PIL
.size _raw_spin_lock_flags, .-_raw_spin_lock_flags
...@@ -9,7 +9,8 @@ ...@@ -9,7 +9,8 @@
#define HI_MAGIC 0x80808080 #define HI_MAGIC 0x80808080
.align 32 .align 32
.global strlen .globl strlen
.type strlen,#function
strlen: strlen:
mov %o0, %o1 mov %o0, %o1
andcc %o0, 3, %g0 andcc %o0, 3, %g0
...@@ -75,3 +76,5 @@ strlen: ...@@ -75,3 +76,5 @@ strlen:
13: 13:
retl retl
mov 2, %o0 mov 2, %o0
.size strlen, .-strlen
...@@ -7,8 +7,9 @@ ...@@ -7,8 +7,9 @@
#include <asm/asi.h> #include <asm/asi.h>
.text .text
.align 4 .align 32
.global strncmp .globl strncmp
.type strncmp,#function
strncmp: strncmp:
brlez,pn %o2, 3f brlez,pn %o2, 3f
lduba [%o0] (ASI_PNF), %o3 lduba [%o0] (ASI_PNF), %o3
...@@ -28,3 +29,4 @@ strncmp: ...@@ -28,3 +29,4 @@ strncmp:
3: 3:
retl retl
clr %o0 clr %o0
.size strncmp, .-strncmp
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
0: .xword 0x0101010101010101 0: .xword 0x0101010101010101
.text .text
.align 4 .align 32
/* Must return: /* Must return:
* *
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
.globl __strncpy_from_user .globl __strncpy_from_user
.type __strncpy_from_user,#function
__strncpy_from_user: __strncpy_from_user:
/* %o0=dest, %o1=src, %o2=count */ /* %o0=dest, %o1=src, %o2=count */
sethi %hi(0b), %o5 ! IEU0 Group sethi %hi(0b), %o5 ! IEU0 Group
...@@ -122,6 +123,7 @@ __strncpy_from_user: ...@@ -122,6 +123,7 @@ __strncpy_from_user:
mov %o2, %o0 mov %o2, %o0
2: retl 2: retl
add %o2, %o3, %o0 add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
.section .fixup,#alloc,#execinstr .section .fixup,#alloc,#execinstr
.align 4 .align 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment