Commit 122b05dd authored by Al Viro's avatar Al Viro

amd64: get rid of zeroing

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent d597580d
...@@ -379,6 +379,18 @@ do { \ ...@@ -379,6 +379,18 @@ do { \
: "=r" (err), ltype(x) \ : "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err)) : "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
/* /*
* This doesn't do __uaccess_begin/end - the exception handling * This doesn't do __uaccess_begin/end - the exception handling
* around it must do that. * around it must do that.
......
...@@ -59,44 +59,44 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ...@@ -59,44 +59,44 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
switch (size) { switch (size) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u8 *)dst, (u8 __user *)src, __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1); ret, "b", "b", "=q", 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u16 *)dst, (u16 __user *)src, __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u32 *)dst, (u32 __user *)src, __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4); ret, "l", "k", "=r", 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 8: case 8:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 10: case 10:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10); ret, "q", "", "=r", 10);
if (likely(!ret)) if (likely(!ret))
__get_user_asm(*(u16 *)(8 + (char *)dst), __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
(u16 __user *)(8 + (char __user *)src), (u16 __user *)(8 + (char __user *)src),
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 16: case 16:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16); ret, "q", "", "=r", 16);
if (likely(!ret)) if (likely(!ret))
__get_user_asm(*(u64 *)(8 + (char *)dst), __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
(u64 __user *)(8 + (char __user *)src), (u64 __user *)(8 + (char __user *)src),
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
__uaccess_end(); __uaccess_end();
......
...@@ -76,10 +76,11 @@ EXPORT_SYMBOL(_copy_to_user); ...@@ -76,10 +76,11 @@ EXPORT_SYMBOL(_copy_to_user);
*/ */
unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
{ {
unsigned long res = n;
if (access_ok(VERIFY_READ, from, n)) if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n); res = __copy_from_user_inatomic(to, from, n);
else if (unlikely(res))
memset(to, 0, n); memset(to + n - res, 0, res);
return n; return res;
} }
EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_from_user);
...@@ -80,9 +80,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len) ...@@ -80,9 +80,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
break; break;
} }
clac(); clac();
/* If the destination is a kernel buffer, we always clear the end */
if (!__addr_ok(to))
memset(to, 0, len);
return len; return len;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment