Commit a2494b9b authored by Paul Mundt's avatar Paul Mundt

sh: Kill off dcache writeback from copy_page().

Now that the cache purging is handled manually by all copy_page()
callers, we can kill off copy_page()'s on writeback. This optimizes the
non-aliasing case.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 6e4154d4
...@@ -30,7 +30,9 @@ ENTRY(copy_page) ...@@ -30,7 +30,9 @@ ENTRY(copy_page)
mov r4,r10 mov r4,r10
mov r5,r11 mov r5,r11
mov r5,r8 mov r5,r8
mov.l .Lpsz,r0 mov #(PAGE_SIZE >> 10), r0
shll8 r0
shll2 r0
add r0,r8 add r0,r8
! !
1: mov.l @r11+,r0 1: mov.l @r11+,r0
...@@ -43,7 +45,6 @@ ENTRY(copy_page) ...@@ -43,7 +45,6 @@ ENTRY(copy_page)
mov.l @r11+,r7 mov.l @r11+,r7
#if defined(CONFIG_CPU_SH4) #if defined(CONFIG_CPU_SH4)
movca.l r0,@r10 movca.l r0,@r10
mov r10,r0
#else #else
mov.l r0,@r10 mov.l r0,@r10
#endif #endif
...@@ -55,9 +56,6 @@ ENTRY(copy_page) ...@@ -55,9 +56,6 @@ ENTRY(copy_page)
mov.l r3,@-r10 mov.l r3,@-r10
mov.l r2,@-r10 mov.l r2,@-r10
mov.l r1,@-r10 mov.l r1,@-r10
#if defined(CONFIG_CPU_SH4)
ocbwb @r0
#endif
cmp/eq r11,r8 cmp/eq r11,r8
bf/s 1b bf/s 1b
add #28,r10 add #28,r10
...@@ -68,9 +66,6 @@ ENTRY(copy_page) ...@@ -68,9 +66,6 @@ ENTRY(copy_page)
rts rts
nop nop
.balign 4
.Lpsz: .long PAGE_SIZE
/* /*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied * Return the number of bytes NOT copied
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment