Commit 0a7bf9c8 authored by Andrew Morton's avatar Andrew Morton Committed by Jens Axboe

[PATCH] uninline the ia32 copy_*_user functions

There's more work to do on these, for well-aligned copies.
Arjan has some stuff for that.   First step on that path is
to clean the code up, get it uninlined and have a framework for
making per-CPU-type decisions.
parent a792a27c
......@@ -116,14 +116,11 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(strnlen_user);
#ifdef INTEL_MOVSL
EXPORT_SYMBOL(movsl_mask);
EXPORT_SYMBOL(__copy_user_int);
EXPORT_SYMBOL(__copy_user_zeroing_int);
#endif
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent);
......
......@@ -9,66 +9,20 @@
#include <asm/uaccess.h>
#include <asm/mmx.h>
#ifdef CONFIG_X86_USE_3DNOW_AND_WORKS
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
#ifdef INTEL_MOVSL
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
{
if(n<512)
__copy_user(to,from,n);
else
mmx_copy_user(to,from,n);
}
return n;
if (n < 64)
return 1;
if ((((const long)a1 ^ (const long)a2) & movsl_mask.mask) == 0)
return 1;
return 0;
}
unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
{
if(n<512)
__copy_user_zeroing(to,from,n);
else
mmx_copy_user_zeroing(to, from, n);
}
else
memset(to, 0, n);
return n;
}
#else
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n)) {
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_int(to, from, n);
}
return n;
return 1;
}
unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n)) {
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to,from,n);
else
n = __copy_user_zeroing_int(to, from, n);
} else {
memset(to, 0, n);
}
return n;
}
#endif
/*
......@@ -198,12 +152,9 @@ long strnlen_user(const char *s, long n)
}
#ifdef INTEL_MOVSL
/*
* Copy To/From Userspace
*/
/* Generic arbitrary sized copy. */
unsigned long __copy_user_int(void *to, const void *from,unsigned long size)
static unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
......@@ -289,8 +240,8 @@ unsigned long __copy_user_int(void *to, const void *from,unsigned long size)
return size;
}
unsigned long
__copy_user_zeroing_int(void *to, const void *from, unsigned long size)
static unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
......@@ -383,4 +334,129 @@ __copy_user_zeroing_int(void *to, const void *from, unsigned long size)
: "eax", "edx", "memory");
return size;
}
#else /* INTEL_MOVSL */
/*
* Leave these declared but undefined. They should not be any references to
* them
*/
unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size);
unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size);
#endif /* INTEL_MOVSL */
/* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 2b\n" \
"3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 4b,5b\n" \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
#define __copy_user_zeroing(to,from,size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 6f\n" \
"3: lea 0(%3,%0,4),%0\n" \
"6: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 4b,5b\n" \
" .long 0b,3b\n" \
" .long 1b,6b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
unsigned long __copy_to_user(void *to, const void *from, unsigned long n)
{
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel(to, from, n);
return n;
}
unsigned long __copy_from_user(void *to, const void *from, unsigned long n)
{
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_intel(to, from, n);
return n;
}
unsigned long copy_to_user(void *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
unsigned long copy_from_user(void *to, const void *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
return n;
}
......@@ -45,26 +45,8 @@
extern struct movsl_mask {
int mask;
} ____cacheline_aligned_in_smp movsl_mask;
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
if (n < 64)
return 1;
if ((((const long)a1 ^ (const long)a2) & movsl_mask.mask) == 0)
return 1;
return 0;
}
#else
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
return 1;
}
#endif
/* These are undefined on !INTEL_MOVSL. And they should be unreferenced. */
unsigned long __copy_user_int(void *, const void *, unsigned long);
unsigned long __copy_user_zeroing_int(void *, const void *, unsigned long);
int __verify_write(const void *, unsigned long);
#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
......@@ -280,382 +262,10 @@ do { \
: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
/*
* Copy To/From Userspace
*/
/* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 2b\n" \
"3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 4b,5b\n" \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
#define __copy_user_zeroing(to,from,size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 6f\n" \
"3: lea 0(%3,%0,4),%0\n" \
"6: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 4b,5b\n" \
" .long 0b,3b\n" \
" .long 1b,6b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_int(to, from, n);
return n;
}
static inline unsigned long
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_int(to, from, n);
return n;
}
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
int __d0, __d1; \
switch (size & 3) { \
default: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
"2: shl $2,%0\n" \
" jmp 1b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 1: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: shl $2,%0\n" \
"4: incl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 2: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: shl $2,%0\n" \
"4: addl $2,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 3: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2: movsb\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: shl $2,%0\n" \
"5: addl $2,%0\n" \
"6: incl %0\n" \
" jmp 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,4b\n" \
" .long 1b,5b\n" \
" .long 2b,6b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
} \
} while (0)
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user_zeroing(to, from, size) \
do { \
int __d0, __d1; \
switch (size & 3) { \
default: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
"2: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" jmp 1b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 1: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" incl %0\n" \
" jmp 2b\n" \
"4: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosb\n" \
" popl %%eax\n" \
" incl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 2: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosw\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" addl $2,%0\n" \
" jmp 2b\n" \
"4: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosw\n" \
" popl %%eax\n" \
" addl $2,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 3: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2: movsb\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosw\n" \
" stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" addl $3,%0\n" \
" jmp 2b\n" \
"5: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosw\n" \
" stosb\n" \
" popl %%eax\n" \
" addl $3,%0\n" \
" jmp 2b\n" \
"6: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosb\n" \
" popl %%eax\n" \
" incl %0\n" \
" jmp 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,4b\n" \
" .long 1b,5b\n" \
" .long 2b,6b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
} \
} while (0)
unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
static inline unsigned long
__constant_copy_to_user(void *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
__constant_copy_user(to,from,n);
return n;
}
static inline unsigned long
__constant_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__constant_copy_user_zeroing(to,from,n);
else
memset(to, 0, n);
return n;
}
static inline unsigned long
__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user(to,from,n);
return n;
}
static inline unsigned long
__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user_zeroing(to,from,n);
return n;
}
#define copy_to_user(to,from,n) \
__generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) \
__generic_copy_from_user((to),(from),(n))
#define __copy_to_user(to,from,n) \
__generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) \
__generic_copy_from_user_nocheck((to),(from),(n))
unsigned long copy_to_user(void *to, const void *from, unsigned long n);
unsigned long copy_from_user(void *to, const void *from, unsigned long n);
unsigned long __copy_to_user(void *to, const void *from, unsigned long n);
unsigned long __copy_from_user(void *to, const void *from, unsigned long n);
long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment