Commit ff47ab4f authored by Andi Kleen's avatar Andi Kleen Committed by H. Peter Anvin

x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic

The 64bit __copy_{from,to}_user_inatomic always called
copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
byte accesses.

This especially hurts the futex call, which accesses the 4 byte futex
user value with a complicated fast string operation in a function call,
instead of a single movl.

Use __copy_{from,to}_user for _inatomic instead to get the same
optimizations. The only problem was the might_fault() in those functions.
So move that to new wrapper and call __copy_{f,t}_user_nocheck()
from *_inatomic directly.

32bit already did this correctly by duplicating the code.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.orgSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 6e466452
......@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
}
static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
......@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
}
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
return __copy_from_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
......@@ -164,6 +169,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
}
}
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
return __copy_to_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
......@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
return copy_user_generic(dst, (__force const void *)src, size);
return __copy_from_user_nocheck(dst, (__force const void *)src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
return copy_user_generic((__force void *)dst, src, size);
return __copy_to_user_nocheck((__force void *)dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment