Commit fa5dc772 authored by David S. Miller's avatar David S. Miller

Merge branch 'sparc64-M7-memcpy'

Babu Moger says:

====================
sparc64: Update memcpy, memset etc. for M7/M8 architectures

This series of patches updates the memcpy, memset, copy_to_user, copy_from_user
etc for SPARC M7/M8 architecture.

New algorithm here takes advantage of the M7/M8 block init store ASIs, with much
more optimized way to improve the performance. More detail are in code comments.

Tested and compared the latency measured in ticks(NG4memcpy vs new M7memcpy).

1. Memset numbers(Aligned memset)

No.of bytes   NG4memset	   M7memset    	Delta ((B-A)/A)*100
	     (Avg.Ticks A) (Avg.Ticks B) (latency reduction)
  3		77		25		-67.53
  7		43		33		-23.25
  32		72		68		 -5.55
  128		164		44		-73.17
  256		335		68		-79.70
  512		511		220		-56.94
  1024		1552		627		-59.60
  2048		3515		1322		-62.38
  4096		6303		2472		-60.78
  8192		13118		4867		-62.89
  16384		26206		10371		-60.42
  32768		52501		18569		-64.63
  65536		100219		35899		-64.17

2. Memcpy numbers(Aligned memcpy)

No.of bytes   NG4memcpy	   M7memcpy    	Delta ((B-A)/A)*100
	     (Avg.Ticks A) (Avg.Ticks B) (latency reduction)
  3		20		19		-5
  7		29		27		-6.89
  32		30		28		-6.66
  128		89		69		-22.47
  256		142		143		 0.70
  512		341		283		-17.00
  1024		1588		655		-58.75
  2048		3553		1357		-61.80
  4096		7218		2590		-64.11
  8192		13701		5231		-61.82
  16384		28304		10716		-62.13
  32768		56516		22995		-59.31
  65536		115443		50840		-55.96

3. Memset numbers(un-aligned memset)

No.of bytes   NG4memset	   M7memset    	Delta ((B-A)/A)*100
	     (Avg.Ticks A) (Avg.Ticks B) (latency reduction)
  3		40		31		-22.5
  7		52		29		-44.2307692308
  32		89		86		-3.3707865169
  128		201		74		-63.184079602
  256		340		154		-54.7058823529
  512		961		335		-65.1404786681
  1024		1799		686		-61.8677042802
  2048		3575		1260		-64.7552447552
  4096		6560		2627		-59.9542682927
  8192		13161		6018		-54.273991338
  16384		26465		10439		-60.5554505951
  32768		52119		18649		-64.2184232238
  65536		101593		35724		-64.8361599717

4. Memcpy numbers(un-aligned memcpy)

No.of bytes   NG4memcpy	   M7memcpy    	Delta ((B-A)/A)*100
	     (Avg.Ticks A) (Avg.Ticks B) (latency reduction)
  3		26		19		-26.9230769231
  7		48		45		-6.25
  32		52		49		-5.7692307692
  128		284		334		17.6056338028
  256		430		482		12.0930232558
  512		646		690		6.8111455108
  1024		1051		1016		-3.3301617507
  2048		1787		1818		1.7347509793
  4096		3309		3376		2.0247809006
  8192		8151		7444		-8.673782358
  16384		34222		34556		0.9759803635
  32768		87851		95044		8.1877269468
  65536		158331		159572		0.7838010244

There is not much difference in numbers with Un-aligned copies
between NG4memcpy and M7memcpy because they both mostly use the
same algorithems.

v2:
 1. Fixed indentation issues found by David Miller
 2. Used ENTRY and ENDPROC for the labels in M7patch.S as suggested by David Miller
 3. Now M8 also will use M7memcpy. Also tested on M8 config.
 4. These patches are created on top of below M8 patches
    https://patchwork.ozlabs.org/patch/792661/
    https://patchwork.ozlabs.org/patch/792662/
    However, I did not see these patches in sparc-next tree. It may be in queue now.
    It is possible these patches might cause some build problems. It will resolve
    once all M8 patches are in sparc-next tree.

v0: Initial version
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 061273f9 34060b8f
......@@ -603,10 +603,10 @@ niagara_tlb_fixup:
be,pt %xcc, niagara4_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M7
be,pt %xcc, niagara4_patch
be,pt %xcc, sparc_m7_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M8
be,pt %xcc, niagara4_patch
be,pt %xcc, sparc_m7_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_SN
be,pt %xcc, niagara4_patch
......@@ -621,6 +621,18 @@ niagara_tlb_fixup:
ba,a,pt %xcc, 80f
nop
sparc_m7_patch:
call m7_patch_copyops
nop
call m7_patch_bzero
nop
call m7_patch_pageops
nop
ba,a,pt %xcc, 80f
nop
niagara4_patch:
call niagara4_patch_copyops
nop
......
/*
* M7copy_from_user.S: SPARC M7 optimized copy from userspace.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y; \
.text; \
.align 4;
#define EX_LD_FP(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y##_fp; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#define FUNC_NAME M7copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, raw_copy_in_user; \
nop
#endif
#include "M7memcpy.S"
/*
* M7copy_to_user.S: SPARC M7 optimized copy to userspace.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#define EX_ST(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y; \
.text; \
.align 4;
#define EX_ST_FP(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y##_fp; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS
#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23
#endif
#define FUNC_NAME M7copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, raw_copy_in_user; \
nop
#endif
#include "M7memcpy.S"
This diff is collapsed.
This diff is collapsed.
/*
* M7patch.S: Patch generic routines with M7 variant.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#include <linux/linkage.h>
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
ENTRY(m7_patch_copyops)
NG_DO_PATCH(memcpy, M7memcpy)
NG_DO_PATCH(raw_copy_from_user, M7copy_from_user)
NG_DO_PATCH(raw_copy_to_user, M7copy_to_user)
retl
nop
ENDPROC(m7_patch_copyops)
ENTRY(m7_patch_bzero)
NG_DO_PATCH(memset, M7memset)
NG_DO_PATCH(__bzero, M7bzero)
NG_DO_PATCH(__clear_user, NGclear_user)
NG_DO_PATCH(tsb_init, NGtsb_init)
retl
nop
ENDPROC(m7_patch_bzero)
ENTRY(m7_patch_pageops)
NG_DO_PATCH(copy_user_page, NG4copy_user_page)
NG_DO_PATCH(_clear_page, M7clear_page)
NG_DO_PATCH(clear_user_page, M7clear_user_page)
retl
nop
ENDPROC(m7_patch_pageops)
......@@ -36,6 +36,11 @@ lib-$(CONFIG_SPARC64) += NG2patch.o
lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o
lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += Memcpy_utils.o
lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o
lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
......
#ifndef __ASM_MEMCPY_UTILS
#define __ASM_MEMCPY_UTILS
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/visasm.h>
ENTRY(__restore_asi_fp)
VISExitHalf
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi_fp)
ENTRY(__restore_asi)
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi)
ENTRY(memcpy_retl_o2)
ba,pt %xcc, __restore_asi
mov %o2, %o0
ENDPROC(memcpy_retl_o2)
ENTRY(memcpy_retl_o2_plus_1)
ba,pt %xcc, __restore_asi
add %o2, 1, %o0
ENDPROC(memcpy_retl_o2_plus_1)
ENTRY(memcpy_retl_o2_plus_3)
ba,pt %xcc, __restore_asi
add %o2, 3, %o0
ENDPROC(memcpy_retl_o2_plus_3)
ENTRY(memcpy_retl_o2_plus_4)
ba,pt %xcc, __restore_asi
add %o2, 4, %o0
ENDPROC(memcpy_retl_o2_plus_4)
ENTRY(memcpy_retl_o2_plus_5)
ba,pt %xcc, __restore_asi
add %o2, 5, %o0
ENDPROC(memcpy_retl_o2_plus_5)
ENTRY(memcpy_retl_o2_plus_6)
ba,pt %xcc, __restore_asi
add %o2, 6, %o0
ENDPROC(memcpy_retl_o2_plus_6)
ENTRY(memcpy_retl_o2_plus_7)
ba,pt %xcc, __restore_asi
add %o2, 7, %o0
ENDPROC(memcpy_retl_o2_plus_7)
ENTRY(memcpy_retl_o2_plus_8)
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_8)
ENTRY(memcpy_retl_o2_plus_15)
ba,pt %xcc, __restore_asi
add %o2, 15, %o0
ENDPROC(memcpy_retl_o2_plus_15)
ENTRY(memcpy_retl_o2_plus_15_8)
add %o2, 15, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_15_8)
ENTRY(memcpy_retl_o2_plus_16)
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_16)
ENTRY(memcpy_retl_o2_plus_24)
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_24)
ENTRY(memcpy_retl_o2_plus_31)
ba,pt %xcc, __restore_asi
add %o2, 31, %o0
ENDPROC(memcpy_retl_o2_plus_31)
ENTRY(memcpy_retl_o2_plus_32)
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_32)
ENTRY(memcpy_retl_o2_plus_31_32)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_31_32)
ENTRY(memcpy_retl_o2_plus_31_24)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_31_24)
ENTRY(memcpy_retl_o2_plus_31_16)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_31_16)
ENTRY(memcpy_retl_o2_plus_31_8)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_31_8)
ENTRY(memcpy_retl_o2_plus_63)
ba,pt %xcc, __restore_asi
add %o2, 63, %o0
ENDPROC(memcpy_retl_o2_plus_63)
ENTRY(memcpy_retl_o2_plus_63_64)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 64, %o0
ENDPROC(memcpy_retl_o2_plus_63_64)
ENTRY(memcpy_retl_o2_plus_63_56)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 56, %o0
ENDPROC(memcpy_retl_o2_plus_63_56)
ENTRY(memcpy_retl_o2_plus_63_48)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 48, %o0
ENDPROC(memcpy_retl_o2_plus_63_48)
ENTRY(memcpy_retl_o2_plus_63_40)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 40, %o0
ENDPROC(memcpy_retl_o2_plus_63_40)
ENTRY(memcpy_retl_o2_plus_63_32)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_63_32)
ENTRY(memcpy_retl_o2_plus_63_24)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_63_24)
ENTRY(memcpy_retl_o2_plus_63_16)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_63_16)
ENTRY(memcpy_retl_o2_plus_63_8)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_63_8)
ENTRY(memcpy_retl_o2_plus_o5)
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5)
ENTRY(memcpy_retl_o2_plus_o5_plus_1)
add %o5, 1, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_1)
ENTRY(memcpy_retl_o2_plus_o5_plus_4)
add %o5, 4, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_4)
ENTRY(memcpy_retl_o2_plus_o5_plus_8)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8)
ENTRY(memcpy_retl_o2_plus_o5_plus_16)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16)
ENTRY(memcpy_retl_o2_plus_o5_plus_24)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24)
ENTRY(memcpy_retl_o2_plus_o5_plus_32)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32)
ENTRY(memcpy_retl_o2_plus_o5_64)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_64)
ENTRY(memcpy_retl_o2_plus_g1)
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1)
ENTRY(memcpy_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_1)
ENTRY(memcpy_retl_o2_plus_g1_plus_8)
add %g1, 8, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_8)
ENTRY(memcpy_retl_o2_plus_o4)
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4)
ENTRY(memcpy_retl_o2_plus_o4_plus_8)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8)
ENTRY(memcpy_retl_o2_plus_o4_plus_16)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16)
ENTRY(memcpy_retl_o2_plus_o4_plus_24)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24)
ENTRY(memcpy_retl_o2_plus_o4_plus_32)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32)
ENTRY(memcpy_retl_o2_plus_o4_plus_40)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40)
ENTRY(memcpy_retl_o2_plus_o4_plus_48)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48)
ENTRY(memcpy_retl_o2_plus_o4_plus_56)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56)
ENTRY(memcpy_retl_o2_plus_o4_plus_64)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64)
ENTRY(memcpy_retl_o2_plus_o5_plus_64)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64)
ENTRY(memcpy_retl_o2_plus_o3_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp)
add %o3, 1, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp)
add %o3, 4, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp)
ENTRY(memcpy_retl_o2_plus_o4_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp)
add %o5, 56, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp)
add %o5, 48, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp)
add %o5, 40, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp)
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment