Commit 3f50dbc1 authored by Paolo Ciarrocchi's avatar Paolo Ciarrocchi Committed by Ingo Molnar

x86: coding style fixes to arch/x86/lib/usercopy_32.c

Before:
 total: 63 errors, 2 warnings, 878 lines checked
After:
 total: 0 errors, 2 warnings, 878 lines checked

Compile tested, no change in the binary output:

text    data     bss     dec     hex filename
3231       0       0    3231     c9f usercopy_32.o.after
3231       0       0    3231     c9f usercopy_32.o.before

md5sum:
9f9a3eb43970359ae7cecfd1c9e7cf42  usercopy_32.o.after
9f9a3eb43970359ae7cecfd1c9e7cf42  usercopy_32.o.before
Signed-off-by: default avatarPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bdd3cee2
/* /*
* User address space access functions. * User address space access functions.
* The non inlined parts of asm-i386/uaccess.h are here. * The non inlined parts of asm-i386/uaccess.h are here.
* *
...@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon ...@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#endif #endif
return 1; return 1;
} }
#define movsl_is_ok(a1,a2,n) \ #define movsl_is_ok(a1, a2, n) \
__movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
#define __do_strncpy_from_user(dst,src,count,res) \ #define __do_strncpy_from_user(dst, src, count, res) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
might_sleep(); \ might_sleep(); \
...@@ -61,7 +61,7 @@ do { \ ...@@ -61,7 +61,7 @@ do { \
* least @count bytes long. * least @count bytes long.
* @src: Source address, in user space. * @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL. * @count: Maximum number of bytes to copy, including the trailing NUL.
* *
* Copies a NUL-terminated string from userspace to kernel space. * Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling * Caller must check the specified block with access_ok() before calling
* this function. * this function.
...@@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user); ...@@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
* least @count bytes long. * least @count bytes long.
* @src: Source address, in user space. * @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL. * @count: Maximum number of bytes to copy, including the trailing NUL.
* *
* Copies a NUL-terminated string from userspace to kernel space. * Copies a NUL-terminated string from userspace to kernel space.
* *
* On success, returns the length of the string (not including the trailing * On success, returns the length of the string (not including the trailing
...@@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user);
do { \ do { \
int __d0; \ int __d0; \
might_sleep(); \ might_sleep(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"0: rep; stosl\n" \ "0: rep; stosl\n" \
" movl %2,%0\n" \ " movl %2,%0\n" \
"1: rep; stosb\n" \ "1: rep; stosb\n" \
...@@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) ...@@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
__asm__ __volatile__( __asm__ __volatile__(
" .align 2,0x90\n" " .align 2,0x90\n"
"0: movl 32(%4), %%eax\n" "0: movl 32(%4), %%eax\n"
" cmpl $67, %0\n" " cmpl $67, %0\n"
" jbe 2f\n" " jbe 2f\n"
"1: movl 64(%4), %%eax\n" "1: movl 64(%4), %%eax\n"
" .align 2,0x90\n" " .align 2,0x90\n"
"2: movl 0(%4), %%eax\n" "2: movl 0(%4), %%eax\n"
"21: movl 4(%4), %%edx\n" "21: movl 4(%4), %%edx\n"
" movl %%eax, 0(%3)\n" " movl %%eax, 0(%3)\n"
" movl %%edx, 4(%3)\n" " movl %%edx, 4(%3)\n"
"3: movl 8(%4), %%eax\n" "3: movl 8(%4), %%eax\n"
"31: movl 12(%4),%%edx\n" "31: movl 12(%4),%%edx\n"
" movl %%eax, 8(%3)\n" " movl %%eax, 8(%3)\n"
" movl %%edx, 12(%3)\n" " movl %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n" "4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n" "41: movl 20(%4), %%edx\n"
...@@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) ...@@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
"91: movl 60(%4), %%edx\n" "91: movl 60(%4), %%edx\n"
" movl %%eax, 56(%3)\n" " movl %%eax, 56(%3)\n"
" movl %%edx, 60(%3)\n" " movl %%edx, 60(%3)\n"
" addl $-64, %0\n" " addl $-64, %0\n"
" addl $64, %4\n" " addl $64, %4\n"
" addl $64, %3\n" " addl $64, %3\n"
" cmpl $63, %0\n" " cmpl $63, %0\n"
" ja 0b\n" " ja 0b\n"
"5: movl %0, %%eax\n" "5: movl %0, %%eax\n"
" shrl $2, %0\n" " shrl $2, %0\n"
" andl $3, %%eax\n" " andl $3, %%eax\n"
" cld\n" " cld\n"
"6: rep; movsl\n" "6: rep; movsl\n"
" movl %%eax,%0\n" " movl %%eax,%0\n"
"7: rep; movsb\n" "7: rep; movsb\n"
"8:\n" "8:\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n" "9: lea 0(%%eax,%0,4),%0\n"
"16: pushl %0\n" "16: pushl %0\n"
" pushl %%eax\n" " pushl %%eax\n"
" xorl %%eax,%%eax\n" " xorl %%eax,%%eax\n"
" rep; stosb\n" " rep; stosb\n"
" popl %%eax\n" " popl %%eax\n"
" popl %0\n" " popl %0\n"
" jmp 8b\n" " jmp 8b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
" .align 4\n" " .align 4\n"
" .long 0b,16b\n" " .long 0b,16b\n"
" .long 1b,16b\n" " .long 1b,16b\n"
" .long 2b,16b\n" " .long 2b,16b\n"
" .long 21b,16b\n" " .long 21b,16b\n"
" .long 3b,16b\n" " .long 3b,16b\n"
" .long 31b,16b\n" " .long 31b,16b\n"
" .long 4b,16b\n" " .long 4b,16b\n"
" .long 41b,16b\n" " .long 41b,16b\n"
" .long 10b,16b\n" " .long 10b,16b\n"
" .long 51b,16b\n" " .long 51b,16b\n"
...@@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) ...@@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
" .long 81b,16b\n" " .long 81b,16b\n"
" .long 14b,16b\n" " .long 14b,16b\n"
" .long 91b,16b\n" " .long 91b,16b\n"
" .long 6b,9b\n" " .long 6b,9b\n"
" .long 7b,16b\n" " .long 7b,16b\n"
".previous" ".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1) : "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size) : "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory"); : "eax", "edx", "memory");
...@@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) ...@@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
static unsigned long __copy_user_zeroing_intel_nocache(void *to, static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size) const void __user *from, unsigned long size)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( __asm__ __volatile__(
" .align 2,0x90\n" " .align 2,0x90\n"
...@@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, ...@@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
static unsigned long __copy_user_intel_nocache(void *to, static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size) const void __user *from, unsigned long size)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( __asm__ __volatile__(
" .align 2,0x90\n" " .align 2,0x90\n"
...@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, ...@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to,
#endif /* CONFIG_X86_INTEL_USERCOPY */ #endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */ /* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \ #define __copy_user(to, from, size) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -665,7 +665,7 @@ do { \ ...@@ -665,7 +665,7 @@ do { \
: "memory"); \ : "memory"); \
} while (0) } while (0)
#define __copy_user_zeroing(to,from,size) \ #define __copy_user_zeroing(to, from, size) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
{ {
#ifndef CONFIG_X86_WP_WORKS_OK #ifndef CONFIG_X86_WP_WORKS_OK
if (unlikely(boot_cpu_data.wp_works_ok == 0) && if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
((unsigned long )to) < TASK_SIZE) { ((unsigned long)to) < TASK_SIZE) {
/* /*
* When we are in an atomic section (see * When we are in an atomic section (see
* mm/filemap.c:file_read_actor), return the full * mm/filemap.c:file_read_actor), return the full
...@@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
if (in_atomic()) if (in_atomic())
return n; return n;
/* /*
* CPU does not honor the WP bit when writing * CPU does not honor the WP bit when writing
* from supervisory mode, and due to preemption or SMP, * from supervisory mode, and due to preemption or SMP,
* the page tables can change at any time. * the page tables can change at any time.
* Do it manually. Manfred <manfred@colorfullife.com> * Do it manually. Manfred <manfred@colorfullife.com>
*/ */
while (n) { while (n) {
unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
unsigned long len = PAGE_SIZE - offset; unsigned long len = PAGE_SIZE - offset;
int retval; int retval;
struct page *pg; struct page *pg;
void *maddr; void *maddr;
if (len > n) if (len > n)
len = n; len = n;
survive: survive:
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
retval = get_user_pages(current, current->mm, retval = get_user_pages(current, current->mm,
(unsigned long )to, 1, 1, 0, &pg, NULL); (unsigned long)to, 1, 1, 0, &pg, NULL);
if (retval == -ENOMEM && is_global_init(current)) { if (retval == -ENOMEM && is_global_init(current)) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
...@@ -750,8 +750,8 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -750,8 +750,8 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
if (retval != 1) { if (retval != 1) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
break; break;
} }
maddr = kmap_atomic(pg, KM_USER0); maddr = kmap_atomic(pg, KM_USER0);
memcpy(maddr + offset, from, len); memcpy(maddr + offset, from, len);
...@@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, ...@@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if ( n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n); n = __copy_user_zeroing_intel_nocache(to, from, n);
else else
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
#else #else
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
#endif #endif
return n; return n;
} }
...@@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr ...@@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
unsigned long n) unsigned long n)
{ {
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if ( n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n); n = __copy_user_intel_nocache(to, from, n);
else else
__copy_user(to, from, n); __copy_user(to, from, n);
#else #else
__copy_user(to, from, n); __copy_user(to, from, n);
#endif #endif
return n; return n;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment