Commit 2260ea86 authored by Al Viro's avatar Al Viro

mips: switch to RAW_COPY_USER

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1a4fded6
......@@ -69,6 +69,7 @@ config MIPS
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_ARCH_HARDENED_USERCOPY
select ARCH_HAS_RAW_COPY_USER
menu "Machine selection"
......
......@@ -882,257 +882,35 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#endif /* CONFIG_EVA */
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define __copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
might_fault(); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
#define __copy_from_user_inatomic(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\
__cu_len);\
else \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
/*
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
\
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_to_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \
__cu_len; \
})
/*
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define __copy_from_user(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (eva_kernel_access())
return __invoke_copy_to_kernel(to, from, n);
else
return __invoke_copy_to_user(to, from, n);
}
/*
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define copy_from_user(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len, __cu_res; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_res = __cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) { \
__cu_res = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_res = __invoke_copy_from_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \
if (unlikely(__cu_res)) \
memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \
__cu_res; \
})
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (eva_kernel_access())
return __invoke_copy_from_kernel(to, from, n);
else
return __invoke_copy_from_user(to, from, n);
}
#define __copy_in_user(to, from, n) \
({ \
void __user *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (eva_kernel_access()) { \
__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
__cu_len); \
} else { \
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#define copy_in_user(to, from, n) \
({ \
void __user *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (eva_kernel_access()) { \
__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
__cu_len); \
} else { \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \
__cu_len; \
})
static inline unsigned long
raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
{
if (eva_kernel_access())
return ___invoke_copy_in_kernel(to, from, n);
else
return ___invoke_copy_in_user(to, from, n);
}
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment