Commit 8695c37d authored by David S. Miller's avatar David S. Miller

sparc: Convert some assembler over to linakge.h's ENTRY/ENDPROC

Use those, instead of doing it all by hand.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b55e81b9
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/ */
#include <linux/linkage.h>
.text .text
.align 4 ENTRY(__ashldi3)
.globl __ashldi3
__ashldi3:
cmp %o2, 0 cmp %o2, 0
be 9f be 9f
mov 0x20, %g2 mov 0x20, %g2
...@@ -32,3 +32,4 @@ __ashldi3: ...@@ -32,3 +32,4 @@ __ashldi3:
9: 9:
retl retl
nop nop
ENDPROC(__ashldi3)
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/ */
#include <linux/linkage.h>
.text .text
.align 4 ENTRY(__ashrdi3)
.globl __ashrdi3
__ashrdi3:
tst %o2 tst %o2
be 3f be 3f
or %g0, 32, %g2 or %g0, 32, %g2
...@@ -34,3 +34,4 @@ __ashrdi3: ...@@ -34,3 +34,4 @@ __ashrdi3:
3: 3:
jmpl %o7 + 8, %g0 jmpl %o7 + 8, %g0
nop nop
ENDPROC(__ashrdi3)
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/ */
#include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/backoff.h> #include <asm/backoff.h>
...@@ -13,9 +14,7 @@ ...@@ -13,9 +14,7 @@
* memory barriers, and a second which returns * memory barriers, and a second which returns
* a value and does the barriers. * a value and does the barriers.
*/ */
.globl atomic_add ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
.type atomic_add,#function
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1 1: lduw [%o1], %g1
add %g1, %o0, %g7 add %g1, %o0, %g7
...@@ -26,11 +25,9 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -26,11 +25,9 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
retl retl
nop nop
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add, .-atomic_add ENDPROC(atomic_add)
.globl atomic_sub ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
.type atomic_sub,#function
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1 1: lduw [%o1], %g1
sub %g1, %o0, %g7 sub %g1, %o0, %g7
...@@ -41,11 +38,9 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ ...@@ -41,11 +38,9 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl retl
nop nop
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub, .-atomic_sub ENDPROC(atomic_sub)
.globl atomic_add_ret ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
.type atomic_add_ret,#function
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1 1: lduw [%o1], %g1
add %g1, %o0, %g7 add %g1, %o0, %g7
...@@ -56,11 +51,9 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -56,11 +51,9 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl retl
sra %g1, 0, %o0 sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret ENDPROC(atomic_add_ret)
.globl atomic_sub_ret ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
.type atomic_sub_ret,#function
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1 1: lduw [%o1], %g1
sub %g1, %o0, %g7 sub %g1, %o0, %g7
...@@ -71,11 +64,9 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ ...@@ -71,11 +64,9 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl retl
sra %g1, 0, %o0 sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub_ret, .-atomic_sub_ret ENDPROC(atomic_sub_ret)
.globl atomic64_add ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
.type atomic64_add,#function
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1 1: ldx [%o1], %g1
add %g1, %o0, %g7 add %g1, %o0, %g7
...@@ -86,11 +77,9 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -86,11 +77,9 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
retl retl
nop nop
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add, .-atomic64_add ENDPROC(atomic64_add)
.globl atomic64_sub ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
.type atomic64_sub,#function
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1 1: ldx [%o1], %g1
sub %g1, %o0, %g7 sub %g1, %o0, %g7
...@@ -101,11 +90,9 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ ...@@ -101,11 +90,9 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl retl
nop nop
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub, .-atomic64_sub ENDPROC(atomic64_sub)
.globl atomic64_add_ret ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
.type atomic64_add_ret,#function
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1 1: ldx [%o1], %g1
add %g1, %o0, %g7 add %g1, %o0, %g7
...@@ -116,11 +103,9 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -116,11 +103,9 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl retl
add %g1, %o0, %o0 add %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret ENDPROC(atomic64_add_ret)
.globl atomic64_sub_ret ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
.type atomic64_sub_ret,#function
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1 1: ldx [%o1], %g1
sub %g1, %o0, %g7 sub %g1, %o0, %g7
...@@ -131,4 +116,4 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ ...@@ -131,4 +116,4 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl retl
sub %g1, %o0, %o0 sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub_ret, .-atomic64_sub_ret ENDPROC(atomic64_sub_ret)
...@@ -3,14 +3,13 @@ ...@@ -3,14 +3,13 @@
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/ */
#include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/backoff.h> #include <asm/backoff.h>
.text .text
.globl test_and_set_bit ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
.type test_and_set_bit,#function
test_and_set_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -29,11 +28,9 @@ test_and_set_bit: /* %o0=nr, %o1=addr */ ...@@ -29,11 +28,9 @@ test_and_set_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_set_bit, .-test_and_set_bit ENDPROC(test_and_set_bit)
.globl test_and_clear_bit ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
.type test_and_clear_bit,#function
test_and_clear_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -52,11 +49,9 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */ ...@@ -52,11 +49,9 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_clear_bit, .-test_and_clear_bit ENDPROC(test_and_clear_bit)
.globl test_and_change_bit ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
.type test_and_change_bit,#function
test_and_change_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -75,11 +70,9 @@ test_and_change_bit: /* %o0=nr, %o1=addr */ ...@@ -75,11 +70,9 @@ test_and_change_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_change_bit, .-test_and_change_bit ENDPROC(test_and_change_bit)
.globl set_bit ENTRY(set_bit) /* %o0=nr, %o1=addr */
.type set_bit,#function
set_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -96,11 +89,9 @@ set_bit: /* %o0=nr, %o1=addr */ ...@@ -96,11 +89,9 @@ set_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size set_bit, .-set_bit ENDPROC(set_bit)
.globl clear_bit ENTRY(clear_bit) /* %o0=nr, %o1=addr */
.type clear_bit,#function
clear_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -117,11 +108,9 @@ clear_bit: /* %o0=nr, %o1=addr */ ...@@ -117,11 +108,9 @@ clear_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size clear_bit, .-clear_bit ENDPROC(clear_bit)
.globl change_bit ENTRY(change_bit) /* %o0=nr, %o1=addr */
.type change_bit,#function
change_bit: /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3) BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %o2 mov 1, %o2
...@@ -138,4 +127,4 @@ change_bit: /* %o0=nr, %o1=addr */ ...@@ -138,4 +127,4 @@ change_bit: /* %o0=nr, %o1=addr */
retl retl
nop nop
2: BACKOFF_SPIN(%o3, %o4, 1b) 2: BACKOFF_SPIN(%o3, %o4, 1b)
.size change_bit, .-change_bit ENDPROC(change_bit)
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/ */
#include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset). /* Zero out 64 bytes of memory at (buf + offset).
...@@ -44,10 +45,7 @@ ...@@ -44,10 +45,7 @@
*/ */
.text .text
.align 4 ENTRY(bzero_1page)
.globl bzero_1page, __copy_1page
bzero_1page:
/* NOTE: If you change the number of insns of this routine, please check /* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */ * arch/sparc/mm/hypersparc.S */
/* %o0 = buf */ /* %o0 = buf */
...@@ -65,8 +63,9 @@ bzero_1page: ...@@ -65,8 +63,9 @@ bzero_1page:
retl retl
nop nop
ENDPROC(bzero_1page)
__copy_1page: ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check /* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */ * arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */ /* %o0 = dst, %o1 = src */
...@@ -87,3 +86,4 @@ __copy_1page: ...@@ -87,3 +86,4 @@ __copy_1page:
retl retl
nop nop
ENDPROC(__copy_1page)
...@@ -4,11 +4,11 @@ ...@@ -4,11 +4,11 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net> * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/ */
#include <linux/linkage.h>
.text .text
.globl memset ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */
.type memset, #function
memset: /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3 and %o1, 0xff, %o3
mov %o2, %o1 mov %o2, %o1
sllx %o3, 8, %g1 sllx %o3, 8, %g1
...@@ -19,9 +19,7 @@ memset: /* %o0=buf, %o1=pat, %o2=len */ ...@@ -19,9 +19,7 @@ memset: /* %o0=buf, %o1=pat, %o2=len */
ba,pt %xcc, 1f ba,pt %xcc, 1f
or %g1, %o2, %o2 or %g1, %o2, %o2
.globl __bzero ENTRY(__bzero) /* %o0=buf, %o1=len */
.type __bzero, #function
__bzero: /* %o0=buf, %o1=len */
clr %o2 clr %o2
1: mov %o0, %o3 1: mov %o0, %o3
brz,pn %o1, __bzero_done brz,pn %o1, __bzero_done
...@@ -78,8 +76,8 @@ __bzero_tiny: ...@@ -78,8 +76,8 @@ __bzero_tiny:
__bzero_done: __bzero_done:
retl retl
mov %o3, %o0 mov %o3, %o0
.size __bzero, .-__bzero ENDPROC(__bzero)
.size memset, .-memset ENDPROC(memset)
#define EX_ST(x,y) \ #define EX_ST(x,y) \
98: x,y; \ 98: x,y; \
...@@ -89,9 +87,7 @@ __bzero_done: ...@@ -89,9 +87,7 @@ __bzero_done:
.text; \ .text; \
.align 4; .align 4;
.globl __clear_user ENTRY(__clear_user) /* %o0=buf, %o1=len */
.type __clear_user, #function
__clear_user: /* %o0=buf, %o1=len */
brz,pn %o1, __clear_user_done brz,pn %o1, __clear_user_done
cmp %o1, 16 cmp %o1, 16
bl,pn %icc, __clear_user_tiny bl,pn %icc, __clear_user_tiny
...@@ -146,4 +142,4 @@ __clear_user_tiny: ...@@ -146,4 +142,4 @@ __clear_user_tiny:
__clear_user_done: __clear_user_done:
retl retl
clr %o0 clr %o0
.size __clear_user, .-__clear_user ENDPROC(__clear_user)
#include <linux/linkage.h>
.text .text
.align 32 ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
.globl ip_fast_csum
.type ip_fast_csum,#function
ip_fast_csum: /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7 sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2 lduw [%o0 + 0x00], %o2
lduw [%o0 + 0x04], %g2 lduw [%o0 + 0x04], %g2
...@@ -31,4 +30,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */ ...@@ -31,4 +30,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */
set 0xffff, %o1 set 0xffff, %o1
retl retl
and %o2, %o1, %o0 and %o2, %o1, %o0
.size ip_fast_csum, .-ip_fast_csum ENDPROC(ip_fast_csum)
#include <linux/linkage.h>
.globl __lshrdi3 ENTRY(__lshrdi3)
__lshrdi3:
cmp %o2, 0 cmp %o2, 0
be 3f be 3f
mov 0x20, %g2 mov 0x20, %g2
...@@ -24,3 +24,4 @@ __lshrdi3: ...@@ -24,3 +24,4 @@ __lshrdi3:
3: 3:
retl retl
nop nop
ENDPROC(__lshrdi3)
...@@ -4,11 +4,10 @@ ...@@ -4,11 +4,10 @@
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/ */
#include <linux/linkage.h>
.text .text
.align 32 ENTRY(memmove) /* o0=dst o1=src o2=len */
.globl memmove
.type memmove,#function
memmove: /* o0=dst o1=src o2=len */
mov %o0, %g1 mov %o0, %g1
cmp %o0, %o1 cmp %o0, %o1
bleu,pt %xcc, memcpy bleu,pt %xcc, memcpy
...@@ -28,4 +27,4 @@ memmove: /* o0=dst o1=src o2=len */ ...@@ -28,4 +27,4 @@ memmove: /* o0=dst o1=src o2=len */
retl retl
mov %g1, %o0 mov %g1, %o0
.size memmove, .-memmove ENDPROC(memmove)
...@@ -8,16 +8,16 @@ ...@@ -8,16 +8,16 @@
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
#include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#define LO_MAGIC 0x01010101 #define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080 #define HI_MAGIC 0x80808080
.align 4 .align 4
.global __strlen_user, __strnlen_user ENTRY(__strlen_user)
__strlen_user:
sethi %hi(32768), %o1 sethi %hi(32768), %o1
__strnlen_user: ENTRY(__strnlen_user)
mov %o1, %g1 mov %o1, %g1
mov %o0, %o1 mov %o0, %o1
andcc %o0, 3, %g0 andcc %o0, 3, %g0
...@@ -78,6 +78,8 @@ __strnlen_user: ...@@ -78,6 +78,8 @@ __strnlen_user:
mov 2, %o0 mov 2, %o0
23: retl 23: retl
mov 3, %o0 mov 3, %o0
ENDPROC(__strlen_user)
ENDPROC(__strnlen_user)
.section .fixup,#alloc,#execinstr .section .fixup,#alloc,#execinstr
.align 4 .align 4
......
...@@ -3,10 +3,10 @@ ...@@ -3,10 +3,10 @@
* generic strncmp routine. * generic strncmp routine.
*/ */
#include <linux/linkage.h>
.text .text
.align 4 ENTRY(strncmp)
.global strncmp
strncmp:
mov %o0, %g3 mov %o0, %g3
mov 0, %o3 mov 0, %o3
...@@ -115,3 +115,4 @@ strncmp: ...@@ -115,3 +115,4 @@ strncmp:
and %g2, 0xff, %o0 and %g2, 0xff, %o0
retl retl
sub %o3, %o0, %o0 sub %o3, %o0, %o0
ENDPROC(strncmp)
...@@ -4,13 +4,11 @@ ...@@ -4,13 +4,11 @@
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/ */
#include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
.text .text
.align 32 ENTRY(strncmp)
.globl strncmp
.type strncmp,#function
strncmp:
brlez,pn %o2, 3f brlez,pn %o2, 3f
lduba [%o0] (ASI_PNF), %o3 lduba [%o0] (ASI_PNF), %o3
1: 1:
...@@ -29,4 +27,4 @@ strncmp: ...@@ -29,4 +27,4 @@ strncmp:
3: 3:
retl retl
clr %o0 clr %o0
.size strncmp, .-strncmp ENDPROC(strncmp)
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
* Copyright(C) 1996 David S. Miller * Copyright(C) 1996 David S. Miller
*/ */
#include <linux/linkage.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/errno.h> #include <asm/errno.h>
.text .text
.align 4
/* Must return: /* Must return:
* *
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
* bytes copied if we hit a null byte * bytes copied if we hit a null byte
*/ */
.globl __strncpy_from_user ENTRY(__strncpy_from_user)
__strncpy_from_user:
/* %o0=dest, %o1=src, %o2=count */ /* %o0=dest, %o1=src, %o2=count */
mov %o2, %o3 mov %o2, %o3
1: 1:
...@@ -35,6 +34,7 @@ __strncpy_from_user: ...@@ -35,6 +34,7 @@ __strncpy_from_user:
add %o2, 1, %o0 add %o2, 1, %o0
retl retl
sub %o3, %o0, %o0 sub %o3, %o0, %o0
ENDPROC(__strncpy_from_user)
.section .fixup,#alloc,#execinstr .section .fixup,#alloc,#execinstr
.align 4 .align 4
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/ */
#include <linux/linkage.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -12,7 +13,6 @@ ...@@ -12,7 +13,6 @@
0: .xword 0x0101010101010101 0: .xword 0x0101010101010101
.text .text
.align 32
/* Must return: /* Must return:
* *
...@@ -30,9 +30,7 @@ ...@@ -30,9 +30,7 @@
* and average length is 18 or so. * and average length is 18 or so.
*/ */
.globl __strncpy_from_user ENTRY(__strncpy_from_user)
.type __strncpy_from_user,#function
__strncpy_from_user:
/* %o0=dest, %o1=src, %o2=count */ /* %o0=dest, %o1=src, %o2=count */
andcc %o1, 7, %g0 ! IEU1 Group andcc %o1, 7, %g0 ! IEU1 Group
bne,pn %icc, 30f ! CTI bne,pn %icc, 30f ! CTI
...@@ -123,7 +121,7 @@ __strncpy_from_user: ...@@ -123,7 +121,7 @@ __strncpy_from_user:
mov %o2, %o0 mov %o2, %o0
2: retl 2: retl
add %o2, %o3, %o0 add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user ENDPROC(__strncpy_from_user)
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net> * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/ */
#include <linux/linkage.h>
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/dcu.h> #include <asm/dcu.h>
...@@ -19,12 +20,9 @@ ...@@ -19,12 +20,9 @@
* !(len & 127) && len >= 256 * !(len & 127) && len >= 256
*/ */
.text .text
.align 32
/* VIS versions. */ /* VIS versions. */
.globl xor_vis_2 ENTRY(xor_vis_2)
.type xor_vis_2,#function
xor_vis_2:
rd %fprs, %o5 rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0 andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f be,pt %icc, 0f
...@@ -91,11 +89,9 @@ xor_vis_2: ...@@ -91,11 +89,9 @@ xor_vis_2:
wr %g1, %g0, %asi wr %g1, %g0, %asi
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
.size xor_vis_2, .-xor_vis_2 ENDPROC(xor_vis_2)
.globl xor_vis_3 ENTRY(xor_vis_3)
.type xor_vis_3,#function
xor_vis_3:
rd %fprs, %o5 rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0 andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f be,pt %icc, 0f
...@@ -159,11 +155,9 @@ xor_vis_3: ...@@ -159,11 +155,9 @@ xor_vis_3:
wr %g1, %g0, %asi wr %g1, %g0, %asi
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
.size xor_vis_3, .-xor_vis_3 ENDPROC(xor_vis_3)
.globl xor_vis_4 ENTRY(xor_vis_4)
.type xor_vis_4,#function
xor_vis_4:
rd %fprs, %o5 rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0 andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f be,pt %icc, 0f
...@@ -246,11 +240,9 @@ xor_vis_4: ...@@ -246,11 +240,9 @@ xor_vis_4:
wr %g1, %g0, %asi wr %g1, %g0, %asi
retl retl
wr %g0, 0, %fprs wr %g0, 0, %fprs
.size xor_vis_4, .-xor_vis_4 ENDPROC(xor_vis_4)
.globl xor_vis_5 ENTRY(xor_vis_5)
.type xor_vis_5,#function
xor_vis_5:
save %sp, -192, %sp save %sp, -192, %sp
rd %fprs, %o5 rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0 andcc %o5, FPRS_FEF|FPRS_DU, %g0
...@@ -354,12 +346,10 @@ xor_vis_5: ...@@ -354,12 +346,10 @@ xor_vis_5:
wr %g0, 0, %fprs wr %g0, 0, %fprs
ret ret
restore restore
.size xor_vis_5, .-xor_vis_5 ENDPROC(xor_vis_5)
/* Niagara versions. */ /* Niagara versions. */
.globl xor_niagara_2 ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
.type xor_niagara_2,#function
xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
save %sp, -192, %sp save %sp, -192, %sp
prefetch [%i1], #n_writes prefetch [%i1], #n_writes
prefetch [%i2], #one_read prefetch [%i2], #one_read
...@@ -402,11 +392,9 @@ xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */ ...@@ -402,11 +392,9 @@ xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
wr %g7, 0x0, %asi wr %g7, 0x0, %asi
ret ret
restore restore
.size xor_niagara_2, .-xor_niagara_2 ENDPROC(xor_niagara_2)
.globl xor_niagara_3 ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
.type xor_niagara_3,#function
xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp save %sp, -192, %sp
prefetch [%i1], #n_writes prefetch [%i1], #n_writes
prefetch [%i2], #one_read prefetch [%i2], #one_read
...@@ -465,11 +453,9 @@ xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ ...@@ -465,11 +453,9 @@ xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
wr %g7, 0x0, %asi wr %g7, 0x0, %asi
ret ret
restore restore
.size xor_niagara_3, .-xor_niagara_3 ENDPROC(xor_niagara_3)
.globl xor_niagara_4 ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
.type xor_niagara_4,#function
xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp save %sp, -192, %sp
prefetch [%i1], #n_writes prefetch [%i1], #n_writes
prefetch [%i2], #one_read prefetch [%i2], #one_read
...@@ -549,11 +535,9 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ ...@@ -549,11 +535,9 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
wr %g7, 0x0, %asi wr %g7, 0x0, %asi
ret ret
restore restore
.size xor_niagara_4, .-xor_niagara_4 ENDPROC(xor_niagara_4)
.globl xor_niagara_5 ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
.type xor_niagara_5,#function
xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp save %sp, -192, %sp
prefetch [%i1], #n_writes prefetch [%i1], #n_writes
prefetch [%i2], #one_read prefetch [%i2], #one_read
...@@ -649,4 +633,4 @@ xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 * ...@@ -649,4 +633,4 @@ xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 *
wr %g7, 0x0, %asi wr %g7, 0x0, %asi
ret ret
restore restore
.size xor_niagara_5, .-xor_niagara_5 ENDPROC(xor_niagara_5)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment