Commit 1a4fded6 authored by Al Viro's avatar Al Viro

mips: get rid of tail-zeroing in primitives

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent ab0aca27
......@@ -139,15 +139,6 @@
.set noreorder
.set noat
/*
* t7 is used as a flag to note inatomic mode.
*/
LEAF(__copy_user_inatomic)
EXPORT_SYMBOL(__copy_user_inatomic)
b __copy_user_common
li t7, 1
END(__copy_user_inatomic)
/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
......@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
li t7, 0 /* not inatomic */
__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
......@@ -414,25 +403,7 @@ l_exc:
LOAD t0, TI_TASK($28)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
SUB len, AT, t0 # len number of uncopied bytes
bnez t7, 2f /* Skip the zeroing out part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
beqz len, done
SUB src, len, 1
1: sb zero, 0(dst)
ADD dst, dst, 1
bnez src, 1b
SUB src, src, 1
2: jr ra
jr ra
nop
......
......@@ -841,9 +841,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic, to, from, n)
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
......@@ -854,9 +851,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
......@@ -867,8 +861,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
/* EVA specific functions */
extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
......@@ -882,9 +874,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_from_user_eva, to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic_eva, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_to_user_eva, to, from, n)
......@@ -930,8 +919,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len; \
})
extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
......@@ -966,12 +953,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
__cu_from,\
__cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\
__cu_len);\
else \
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
__cu_from, \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
......
......@@ -562,39 +562,9 @@
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
.set reorder /* DADDI_WAR */
SUB src, len, 1
beqz len, .Ldone\@
.set noreorder
1: sb zero, 0(dst)
ADD dst, dst, 1
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
bnez src, 1b
SUB src, src, 1
#else
.set push
.set noat
li v1, 1
bnez src, 1b
SUB src, src, v1
.set pop
#endif
jr ra
nop
#define SEXC(n) \
.set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u\@: \
......@@ -672,15 +642,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
move a2, zero
END(__rmemcpy)
/*
* t6 is used as a flag to note inatomic mode.
*/
LEAF(__copy_user_inatomic)
EXPORT_SYMBOL(__copy_user_inatomic)
b __copy_user_common
li t6, 1
END(__copy_user_inatomic)
/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
......@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.L__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
li t6, 0 /* not inatomic */
__copy_user_common:
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
......@@ -708,20 +667,12 @@ __copy_user_common:
* space
*/
LEAF(__copy_user_inatomic_eva)
EXPORT_SYMBOL(__copy_user_inatomic_eva)
b __copy_from_user_common
li t6, 1
END(__copy_user_inatomic_eva)
/*
* __copy_from_user (EVA)
*/
LEAF(__copy_from_user_eva)
EXPORT_SYMBOL(__copy_from_user_eva)
li t6, 0 /* not inatomic */
__copy_from_user_common:
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment