Commit 23504bae authored by Al Viro's avatar Al Viro

tile: get rid of zeroing, switch to RAW_COPY_USER

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent c0ea73f1
...@@ -33,6 +33,7 @@ config TILE ...@@ -33,6 +33,7 @@ config TILE
select USER_STACKTRACE_SUPPORT select USER_STACKTRACE_SUPPORT
select USE_PMC if PERF_EVENTS select USE_PMC if PERF_EVENTS
select VIRT_TO_BUS select VIRT_TO_BUS
select ARCH_HAS_RAW_COPY_USER
config MMU config MMU
def_bool y def_bool y
......
...@@ -313,145 +313,16 @@ extern int __put_user_bad(void) ...@@ -313,145 +313,16 @@ extern int __put_user_bad(void)
((x) = 0, -EFAULT); \ ((x) = 0, -EFAULT); \
}) })
/** extern unsigned long __must_check
* __copy_to_user() - copy data into user space, with less checking. raw_copy_to_user(void __user *to, const void *from, unsigned long n);
* @to: Destination address, in user space. extern unsigned long __must_check
* @from: Source address, in kernel space. raw_copy_from_user(void *to, const void __user *from, unsigned long n);
* @n: Number of bytes to copy. #define INLINE_COPY_FROM_USER
* #define INLINE_COPY_TO_USER
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* An alternate version - __copy_to_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable().
*/
extern unsigned long __must_check __copy_to_user_inatomic(
void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
/**
* __copy_from_user() - copy data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable(). This version
* does *NOT* pad with zeros.
*/
extern unsigned long __must_check __copy_from_user_inatomic(
void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_from_user_zeroing(
void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_from_user_zeroing(to, from, n);
}
static inline unsigned long __must_check
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
unsigned long n)
{
int sz = __compiletime_object_size(to);
if (likely(sz == -1 || sz >= n))
n = _copy_from_user(to, from, n);
else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
#ifdef __tilegx__ #ifdef __tilegx__
/** extern unsigned long raw_copy_in_user(
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern unsigned long __copy_in_user_inatomic(
void __user *to, const void __user *from, unsigned long n); void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_in_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
n = __copy_in_user(to, from, n);
return n;
}
#endif #endif
......
...@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount); ...@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */ /* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic); EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__copy_from_user_inatomic); EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(__copy_from_user_zeroing);
#ifdef __tilegx__ #ifdef __tilegx__
EXPORT_SYMBOL(__copy_in_user_inatomic); EXPORT_SYMBOL(raw_copy_in_user);
#endif #endif
/* hypervisor glue */ /* hypervisor glue */
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define IS_MEMCPY 0 #define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1 #define IS_COPY_FROM_USER 1
#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1 #define IS_COPY_TO_USER -1
.section .text.memcpy_common, "ax" .section .text.memcpy_common, "ax"
...@@ -42,40 +41,31 @@ ...@@ -42,40 +41,31 @@
9 9
/* __copy_from_user_inatomic takes the kernel target address in r0, /* raw_copy_from_user takes the kernel target address in r0,
* the user source in r1, and the bytes to copy in r2. * the user source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0. * It returns the number of uncopiable bytes (hopefully zero) in r0.
*/ */
ENTRY(__copy_from_user_inatomic) ENTRY(raw_copy_from_user)
.type __copy_from_user_inatomic, @function .type raw_copy_from_user, @function
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
.text.memcpy_common, \ .text.memcpy_common, \
.Lend_memcpy_common - __copy_from_user_inatomic) .Lend_memcpy_common - raw_copy_from_user)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common } { movei r29, IS_COPY_FROM_USER; j memcpy_common }
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic .size raw_copy_from_user, . - raw_copy_from_user
/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but /* raw_copy_to_user takes the user target address in r0,
* any uncopiable bytes are zeroed in the target.
*/
ENTRY(__copy_from_user_zeroing)
.type __copy_from_user_zeroing, @function
FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
/* __copy_to_user_inatomic takes the user target address in r0,
* the kernel source in r1, and the bytes to copy in r2. * the kernel source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0. * It returns the number of uncopiable bytes (hopefully zero) in r0.
*/ */
ENTRY(__copy_to_user_inatomic) ENTRY(raw_copy_to_user)
.type __copy_to_user_inatomic, @function .type raw_copy_to_user, @function
FEEDBACK_REENTER(__copy_from_user_inatomic) FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_COPY_TO_USER; j memcpy_common } { movei r29, IS_COPY_TO_USER; j memcpy_common }
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic .size raw_copy_to_user, . - raw_copy_to_user
ENTRY(memcpy) ENTRY(memcpy)
.type memcpy, @function .type memcpy, @function
FEEDBACK_REENTER(__copy_from_user_inatomic) FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_MEMCPY } { movei r29, IS_MEMCPY }
.size memcpy, . - memcpy .size memcpy, . - memcpy
/* Fall through */ /* Fall through */
...@@ -520,12 +510,7 @@ copy_from_user_fixup_loop: ...@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{ bnzt r2, copy_from_user_fixup_loop } { bnzt r2, copy_from_user_fixup_loop }
.Lcopy_from_user_fixup_zero_remainder: .Lcopy_from_user_fixup_zero_remainder:
{ bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ move lr, r27
/* byte-at-a-time loop faulted, so zero the rest. */
{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
{ bnzt r3, 1b }
2: move lr, r27
{ move r0, r2; jrp lr } { move r0, r2; jrp lr }
copy_to_user_fixup_loop: copy_to_user_fixup_loop:
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
__v; \ __v; \
}) })
#define USERCOPY_FUNC __copy_to_user_inatomic #define USERCOPY_FUNC raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v)) #define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v)) #define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v)) #define ST4(p, v) _ST((p), st4, (v))
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#define LD8 LD #define LD8 LD
#include "memcpy_64.c" #include "memcpy_64.c"
#define USERCOPY_FUNC __copy_from_user_inatomic #define USERCOPY_FUNC raw_copy_from_user
#define ST1 ST #define ST1 ST
#define ST2 ST #define ST2 ST
#define ST4 ST #define ST4 ST
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld) #define LD8(p) _LD((p), ld)
#include "memcpy_64.c" #include "memcpy_64.c"
#define USERCOPY_FUNC __copy_in_user_inatomic #define USERCOPY_FUNC raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v)) #define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v)) #define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v)) #define ST4(p, v) _ST((p), st4, (v))
...@@ -83,12 +83,3 @@ ...@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u) #define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld) #define LD8(p) _LD((p), ld)
#include "memcpy_64.c" #include "memcpy_64.c"
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
unsigned long n)
{
unsigned long rc = __copy_from_user_inatomic(to, from, n);
if (unlikely(rc))
memset(to + n - rc, 0, rc);
return rc;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment