Commit 0bcdda0f authored by Atsushi Nemoto's avatar Atsushi Nemoto Committed by Ralf Baechle

[MIPS] Unify csum_partial.S

The 32-bit version and 64-bit version are almost equal.  Unify them.  This
makes further improvements (for example, copying with parallel, supporting
PREFETCH, etc.) easier.
Signed-off-by: default avatarAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 03dbd2e0
......@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial.o memset.o watch.o
lib-y += memset.o watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ralf Baechle
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#define ADDC(sum,reg) \
addu sum, reg; \
sltu v1, sum, reg; \
addu sum, v1
#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
lw t0, (offset + 0x00)(src); \
lw t1, (offset + 0x04)(src); \
lw t2, (offset + 0x08)(src); \
lw t3, (offset + 0x0c)(src); \
ADDC(sum, t0); \
ADDC(sum, t1); \
ADDC(sum, t2); \
ADDC(sum, t3); \
lw t0, (offset + 0x10)(src); \
lw t1, (offset + 0x14)(src); \
lw t2, (offset + 0x18)(src); \
lw t3, (offset + 0x1c)(src); \
ADDC(sum, t0); \
ADDC(sum, t1); \
ADDC(sum, t2); \
ADDC(sum, t3); \
/*
* a0: source address
* a1: length of the area to checksum
* a2: partial checksum
*/
#define src a0
#define dest a1
#define sum v0
.text
.set noreorder
/* unknown src alignment and < 8 bytes to go */
small_csumcpy:
move a1, t2
andi t0, a1, 4
beqz t0, 1f
andi t0, a1, 2
/* Still a full word to go */
ulw t1, (src)
addiu src, 4
ADDC(sum, t1)
1: move t1, zero
beqz t0, 1f
andi t0, a1, 1
/* Still a halfword to go */
ulhu t1, (src)
addiu src, 2
1: beqz t0, 1f
sll t1, t1, 16
lbu t2, (src)
nop
#ifdef __MIPSEB__
sll t2, t2, 8
#endif
or t1, t2
1: ADDC(sum, t1)
/* fold checksum */
sll v1, sum, 16
addu sum, v1
sltu v1, sum, v1
srl sum, sum, 16
addu sum, v1
/* odd buffer alignment? */
beqz t7, 1f
nop
sll v1, sum, 8
srl sum, sum, 8
or sum, v1
andi sum, 0xffff
1:
.set reorder
/* Add the passed partial csum. */
ADDC(sum, a2)
jr ra
.set noreorder
/* ------------------------------------------------------------------------- */
.align 5
LEAF(csum_partial)
move sum, zero
move t7, zero
sltiu t8, a1, 0x8
bnez t8, small_csumcpy /* < 8 bytes to copy */
move t2, a1
beqz a1, out
andi t7, src, 0x1 /* odd buffer? */
hword_align:
beqz t7, word_align
andi t8, src, 0x2
lbu t0, (src)
subu a1, a1, 0x1
#ifdef __MIPSEL__
sll t0, t0, 8
#endif
ADDC(sum, t0)
addu src, src, 0x1
andi t8, src, 0x2
word_align:
beqz t8, dword_align
sltiu t8, a1, 56
lhu t0, (src)
subu a1, a1, 0x2
ADDC(sum, t0)
sltiu t8, a1, 56
addu src, src, 0x2
dword_align:
bnez t8, do_end_words
move t8, a1
andi t8, src, 0x4
beqz t8, qword_align
andi t8, src, 0x8
lw t0, 0x00(src)
subu a1, a1, 0x4
ADDC(sum, t0)
addu src, src, 0x4
andi t8, src, 0x8
qword_align:
beqz t8, oword_align
andi t8, src, 0x10
lw t0, 0x00(src)
lw t1, 0x04(src)
subu a1, a1, 0x8
ADDC(sum, t0)
ADDC(sum, t1)
addu src, src, 0x8
andi t8, src, 0x10
oword_align:
beqz t8, begin_movement
srl t8, a1, 0x7
lw t3, 0x08(src)
lw t4, 0x0c(src)
lw t0, 0x00(src)
lw t1, 0x04(src)
ADDC(sum, t3)
ADDC(sum, t4)
ADDC(sum, t0)
ADDC(sum, t1)
subu a1, a1, 0x10
addu src, src, 0x10
srl t8, a1, 0x7
begin_movement:
beqz t8, 1f
andi t2, a1, 0x40
move_128bytes:
CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
subu t8, t8, 0x01
bnez t8, move_128bytes
addu src, src, 0x80
1:
beqz t2, 1f
andi t2, a1, 0x20
move_64bytes:
CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
addu src, src, 0x40
1:
beqz t2, do_end_words
andi t8, a1, 0x1c
move_32bytes:
CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
andi t8, a1, 0x1c
addu src, src, 0x20
do_end_words:
beqz t8, maybe_end_cruft
srl t8, t8, 0x2
end_words:
lw t0, (src)
subu t8, t8, 0x1
ADDC(sum, t0)
bnez t8, end_words
addu src, src, 0x4
maybe_end_cruft:
andi t2, a1, 0x3
small_memcpy:
j small_csumcpy; move a1, t2
beqz t2, out
move a1, t2
end_bytes:
lb t0, (src)
subu a1, a1, 0x1
bnez a2, end_bytes
addu src, src, 0x1
out:
jr ra
move v0, sum
END(csum_partial)
......@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial.o memset.o watch.o
lib-y += memset.o watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
......
......@@ -2,8 +2,8 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
strnlen_user.o uncached.o
lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o
obj-y += iomap.o
......
......@@ -11,28 +11,44 @@
#include <asm/asm.h>
#include <asm/regdef.h>
#ifdef CONFIG_64BIT
#define T0 ta0
#define T1 ta1
#define T2 ta2
#define T3 ta3
#define T4 t0
#define T7 t3
#else
#define T0 t0
#define T1 t1
#define T2 t2
#define T3 t3
#define T4 t4
#define T7 t7
#endif
#define ADDC(sum,reg) \
addu sum, reg; \
sltu v1, sum, reg; \
addu sum, v1
#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
lw t0, (offset + 0x00)(src); \
lw t1, (offset + 0x04)(src); \
lw t2, (offset + 0x08)(src); \
lw t3, (offset + 0x0c)(src); \
ADDC(sum, t0); \
ADDC(sum, t1); \
ADDC(sum, t2); \
ADDC(sum, t3); \
lw t0, (offset + 0x10)(src); \
lw t1, (offset + 0x14)(src); \
lw t2, (offset + 0x18)(src); \
lw t3, (offset + 0x1c)(src); \
ADDC(sum, t0); \
ADDC(sum, t1); \
ADDC(sum, t2); \
ADDC(sum, t3); \
#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
lw _t0, (offset + 0x00)(src); \
lw _t1, (offset + 0x04)(src); \
lw _t2, (offset + 0x08)(src); \
lw _t3, (offset + 0x0c)(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
ADDC(sum, _t3); \
lw _t0, (offset + 0x10)(src); \
lw _t1, (offset + 0x14)(src); \
lw _t2, (offset + 0x18)(src); \
lw _t3, (offset + 0x1c)(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
ADDC(sum, _t3); \
/*
* a0: source address
......@@ -48,37 +64,37 @@
/* unknown src alignment and < 8 bytes to go */
small_csumcpy:
move a1, ta2
move a1, T2
andi ta0, a1, 4
beqz ta0, 1f
andi ta0, a1, 2
andi T0, a1, 4
beqz T0, 1f
andi T0, a1, 2
/* Still a full word to go */
ulw ta1, (src)
daddiu src, 4
ADDC(sum, ta1)
ulw T1, (src)
PTR_ADDIU src, 4
ADDC(sum, T1)
1: move ta1, zero
beqz ta0, 1f
andi ta0, a1, 1
1: move T1, zero
beqz T0, 1f
andi T0, a1, 1
/* Still a halfword to go */
ulhu ta1, (src)
daddiu src, 2
ulhu T1, (src)
PTR_ADDIU src, 2
1: beqz ta0, 1f
sll ta1, ta1, 16
1: beqz T0, 1f
sll T1, T1, 16
lbu ta2, (src)
lbu T2, (src)
nop
#ifdef __MIPSEB__
sll ta2, ta2, 8
sll T2, T2, 8
#endif
or ta1, ta2
or T1, T2
1: ADDC(sum, ta1)
1: ADDC(sum, T1)
/* fold checksum */
sll v1, sum, 16
......@@ -88,7 +104,7 @@ small_csumcpy:
addu sum, v1
/* odd buffer alignment? */
beqz t3, 1f
beqz T7, 1f
nop
sll v1, sum, 8
srl sum, sum, 8
......@@ -106,37 +122,37 @@ small_csumcpy:
.align 5
LEAF(csum_partial)
move sum, zero
move t3, zero
move T7, zero
sltiu t8, a1, 0x8
bnez t8, small_csumcpy /* < 8 bytes to copy */
move ta2, a1
move T2, a1
beqz a1, out
andi t3, src, 0x1 /* odd buffer? */
andi T7, src, 0x1 /* odd buffer? */
hword_align:
beqz t3, word_align
beqz T7, word_align
andi t8, src, 0x2
lbu ta0, (src)
dsubu a1, a1, 0x1
lbu T0, (src)
LONG_SUBU a1, a1, 0x1
#ifdef __MIPSEL__
sll ta0, ta0, 8
sll T0, T0, 8
#endif
ADDC(sum, ta0)
daddu src, src, 0x1
ADDC(sum, T0)
PTR_ADDU src, src, 0x1
andi t8, src, 0x2
word_align:
beqz t8, dword_align
sltiu t8, a1, 56
lhu ta0, (src)
dsubu a1, a1, 0x2
ADDC(sum, ta0)
lhu T0, (src)
LONG_SUBU a1, a1, 0x2
ADDC(sum, T0)
sltiu t8, a1, 56
daddu src, src, 0x2
PTR_ADDU src, src, 0x2
dword_align:
bnez t8, do_end_words
......@@ -146,95 +162,95 @@ dword_align:
beqz t8, qword_align
andi t8, src, 0x8
lw ta0, 0x00(src)
dsubu a1, a1, 0x4
ADDC(sum, ta0)
daddu src, src, 0x4
lw T0, 0x00(src)
LONG_SUBU a1, a1, 0x4
ADDC(sum, T0)
PTR_ADDU src, src, 0x4
andi t8, src, 0x8
qword_align:
beqz t8, oword_align
andi t8, src, 0x10
lw ta0, 0x00(src)
lw ta1, 0x04(src)
dsubu a1, a1, 0x8
ADDC(sum, ta0)
ADDC(sum, ta1)
daddu src, src, 0x8
lw T0, 0x00(src)
lw T1, 0x04(src)
LONG_SUBU a1, a1, 0x8
ADDC(sum, T0)
ADDC(sum, T1)
PTR_ADDU src, src, 0x8
andi t8, src, 0x10
oword_align:
beqz t8, begin_movement
dsrl t8, a1, 0x7
lw ta3, 0x08(src)
lw t0, 0x0c(src)
lw ta0, 0x00(src)
lw ta1, 0x04(src)
ADDC(sum, ta3)
ADDC(sum, t0)
ADDC(sum, ta0)
ADDC(sum, ta1)
dsubu a1, a1, 0x10
daddu src, src, 0x10
dsrl t8, a1, 0x7
LONG_SRL t8, a1, 0x7
lw T3, 0x08(src)
lw T4, 0x0c(src)
lw T0, 0x00(src)
lw T1, 0x04(src)
ADDC(sum, T3)
ADDC(sum, T4)
ADDC(sum, T0)
ADDC(sum, T1)
LONG_SUBU a1, a1, 0x10
PTR_ADDU src, src, 0x10
LONG_SRL t8, a1, 0x7
begin_movement:
beqz t8, 1f
andi ta2, a1, 0x40
andi T2, a1, 0x40
move_128bytes:
CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
dsubu t8, t8, 0x01
CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
LONG_SUBU t8, t8, 0x01
bnez t8, move_128bytes
daddu src, src, 0x80
PTR_ADDU src, src, 0x80
1:
beqz ta2, 1f
andi ta2, a1, 0x20
beqz T2, 1f
andi T2, a1, 0x20
move_64bytes:
CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
daddu src, src, 0x40
CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
PTR_ADDU src, src, 0x40
1:
beqz ta2, do_end_words
beqz T2, do_end_words
andi t8, a1, 0x1c
move_32bytes:
CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
andi t8, a1, 0x1c
daddu src, src, 0x20
PTR_ADDU src, src, 0x20
do_end_words:
beqz t8, maybe_end_cruft
dsrl t8, t8, 0x2
LONG_SRL t8, t8, 0x2
end_words:
lw ta0, (src)
dsubu t8, t8, 0x1
ADDC(sum, ta0)
lw T0, (src)
LONG_SUBU t8, t8, 0x1
ADDC(sum, T0)
bnez t8, end_words
daddu src, src, 0x4
PTR_ADDU src, src, 0x4
maybe_end_cruft:
andi ta2, a1, 0x3
andi T2, a1, 0x3
small_memcpy:
j small_csumcpy; move a1, ta2 /* XXX ??? */
j small_csumcpy; move a1, T2 /* XXX ??? */
beqz t2, out
move a1, ta2
move a1, T2
end_bytes:
lb ta0, (src)
dsubu a1, a1, 0x1
lb T0, (src)
LONG_SUBU a1, a1, 0x1
bnez a2, end_bytes
daddu src, src, 0x1
PTR_ADDU src, src, 0x1
out:
jr ra
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment