Commit 3fba7e23 authored by Russell King's avatar Russell King

ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore()

Provide uaccess_save_and_enable() and uaccess_restore() to permit
control of userspace visibility to the kernel, and hook these into
the appropriate places in the kernel where we need to access
userspace.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 08446b12
...@@ -22,8 +22,11 @@ ...@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags; \
smp_mb(); \ smp_mb(); \
prefetchw(uaddr); \ prefetchw(uaddr); \
__ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \ "1: ldrex %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -34,12 +37,15 @@ ...@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret; int ret;
u32 val; u32 val;
...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb(); smp_mb();
/* Prefetching cannot fault */ /* Prefetching cannot fault */
prefetchw(uaddr); prefetchw(uaddr);
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n" "1: ldrex %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val) : "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
smp_mb(); smp_mb();
*uval = val; *uval = val;
...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h> #include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \ "1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret = 0; int ret = 0;
u32 val; u32 val;
...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
preempt_disable(); preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n" "1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val) : "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
*uval = val; *uval = val;
preempt_enable(); preempt_enable();
......
...@@ -49,6 +49,21 @@ struct exception_table_entry ...@@ -49,6 +49,21 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs); extern int fixup_exception(struct pt_regs *regs);
/*
* These two functions allow hooking accesses to userspace to increase
* system integrity by ensuring that the kernel can not inadvertantly
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
static inline unsigned int uaccess_save_and_enable(void)
{
return 0;
}
static inline void uaccess_restore(unsigned int flags)
{
}
/* /*
* These two are intentionally not defined anywhere - if the kernel * These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug. * code generates any references to them, that's a bug.
...@@ -165,6 +180,7 @@ extern int __get_user_64t_4(void *); ...@@ -165,6 +180,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \ register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
if (sizeof((x)) >= 8) \ if (sizeof((x)) >= 8) \
...@@ -192,6 +208,7 @@ extern int __get_user_64t_4(void *); ...@@ -192,6 +208,7 @@ extern int __get_user_64t_4(void *);
break; \ break; \
default: __e = __get_user_bad(); break; \ default: __e = __get_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \ x = (typeof(*(p))) __r2; \
__e; \ __e; \
}) })
...@@ -224,6 +241,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -224,6 +241,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \ __put_user_x(__r2, __p, __e, __l, 1); \
...@@ -239,6 +257,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -239,6 +257,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \ break; \
default: __e = __put_user_bad(); break; \ default: __e = __put_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
__e; \ __e; \
}) })
...@@ -300,14 +319,17 @@ static inline void set_fs(mm_segment_t fs) ...@@ -300,14 +319,17 @@ static inline void set_fs(mm_segment_t fs)
do { \ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \ unsigned long __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \ default: (__gu_val) = __get_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
} while (0) } while (0)
...@@ -381,9 +403,11 @@ do { \ ...@@ -381,9 +403,11 @@ do { \
#define __put_user_err(x, ptr, err) \ #define __put_user_err(x, ptr, err) \
do { \ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \ unsigned long __pu_addr = (unsigned long)(ptr); \
unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
...@@ -391,6 +415,7 @@ do { \ ...@@ -391,6 +415,7 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \ default: __put_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
} while (0) } while (0)
#define __put_user_asm_byte(x, __pu_addr, err) \ #define __put_user_asm_byte(x, __pu_addr, err) \
...@@ -474,11 +499,46 @@ do { \ ...@@ -474,11 +499,46 @@ do { \
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); arm_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); static inline unsigned long __must_check
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); __copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__clear_user_std(void __user *addr, unsigned long n);
static inline unsigned long __must_check
__clear_user(void __user *addr, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_clear_user(addr, n);
uaccess_restore(__ua_flags);
return n;
}
#else #else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
...@@ -511,6 +571,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -511,6 +571,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n; return n;
} }
/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero); ...@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(arm_copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(arm_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_2);
......
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
.text .text
/* Prototype: int __clear_user(void *addr, size_t sz) /* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory * Purpose : clear some user memory
* Params : addr - user memory address to clear * Params : addr - user memory address to clear
* : sz - number of bytes to clear * : sz - number of bytes to clear
* Returns : number of bytes NOT cleared * Returns : number of bytes NOT cleared
*/ */
ENTRY(__clear_user_std) ENTRY(__clear_user_std)
WEAK(__clear_user) WEAK(arm_clear_user)
stmfd sp!, {r1, lr} stmfd sp!, {r1, lr}
mov r2, #0 mov r2, #0
cmp r1, #4 cmp r1, #4
...@@ -44,7 +44,7 @@ WEAK(__clear_user) ...@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER( strnebt r2, [r0]) USER( strnebt r2, [r0])
mov r0, #0 mov r0, #0
ldmfd sp!, {r1, pc} ldmfd sp!, {r1, pc}
ENDPROC(__clear_user) ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std) ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_from_user(void *to, const void *from, size_t n) * size_t arm_copy_from_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -89,11 +89,11 @@ ...@@ -89,11 +89,11 @@
.text .text
ENTRY(__copy_from_user) ENTRY(arm_copy_from_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_from_user) ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 0 .align 0
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_to_user(void *to, const void *from, size_t n) * size_t arm_copy_to_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -93,11 +93,11 @@ ...@@ -93,11 +93,11 @@
.text .text
ENTRY(__copy_to_user_std) ENTRY(__copy_to_user_std)
WEAK(__copy_to_user) WEAK(arm_copy_to_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_to_user) ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std) ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
} }
unsigned long unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n) arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
/* /*
* This test is stubbed out of the main function above to keep * This test is stubbed out of the main function above to keep
...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
return n; return n;
} }
unsigned long __clear_user(void __user *addr, unsigned long n) unsigned long arm_clear_user(void __user *addr, unsigned long n)
{ {
/* See rational for this in __copy_to_user() above. */ /* See rational for this in __copy_to_user() above. */
if (n < 64) if (n < 64)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment