Commit 733037c3 authored by Andi Kleen's avatar Andi Kleen Committed by Ben Hutchings

x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic

commit ff47ab4f upstream.

The 64bit __copy_{from,to}_user_inatomic always called
copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
byte accesses.

This especially hurts the futex call, which accesses the 4 byte futex
user value with a complicated fast string operation in a function call,
instead of a single movl.

Use __copy_{from,to}_user for _inatomic instead to get the same
optimizations. The only problem was the might_fault() in those functions.
So move that to new wrapper and call __copy_{f,t}_user_nocheck()
from *_inatomic directly.

32bit already did this correctly by duplicating the code.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.orgSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
Cc: Jaccon Bastiaansen <jaccon.bastiaansen@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: mingo@redhat.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: h.zuidam@computer.org
parent 3cfb9a41
......@@ -68,11 +68,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
}
static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
......@@ -112,11 +111,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
}
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
return __copy_from_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
......@@ -155,6 +160,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
}
}
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
return __copy_to_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
......@@ -221,13 +233,13 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
return copy_user_generic(dst, (__force const void *)src, size);
return __copy_from_user_nocheck(dst, (__force const void *)src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
return copy_user_generic((__force void *)dst, src, size);
return __copy_to_user_nocheck((__force void *)dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment