Commit 3ee1afa3 authored by Nick Piggin's avatar Nick Piggin Committed by Ingo Molnar

x86: some lock annotations for user copy paths, v2

 - introduce might_fault()
 - handle the atomic user copy paths correctly

[ mingo@elte.hu: move might_sleep() outside of in_atomic(). ]
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c10d38dd
...@@ -32,9 +32,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon ...@@ -32,9 +32,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define __do_strncpy_from_user(dst, src, count, res) \ #define __do_strncpy_from_user(dst, src, count, res) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" testl %1,%1\n" \ " testl %1,%1\n" \
" jz 2f\n" \ " jz 2f\n" \
...@@ -121,9 +119,7 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -121,9 +119,7 @@ EXPORT_SYMBOL(strncpy_from_user);
#define __do_clear_user(addr,size) \ #define __do_clear_user(addr,size) \
do { \ do { \
int __d0; \ int __d0; \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"0: rep; stosl\n" \ "0: rep; stosl\n" \
" movl %2,%0\n" \ " movl %2,%0\n" \
...@@ -193,9 +189,7 @@ long strnlen_user(const char __user *s, long n) ...@@ -193,9 +189,7 @@ long strnlen_user(const char __user *s, long n)
unsigned long mask = -__addr_ok(s); unsigned long mask = -__addr_ok(s);
unsigned long res, tmp; unsigned long res, tmp;
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
__asm__ __volatile__( __asm__ __volatile__(
" testl %0, %0\n" " testl %0, %0\n"
......
...@@ -15,9 +15,7 @@ ...@@ -15,9 +15,7 @@
#define __do_strncpy_from_user(dst,src,count,res) \ #define __do_strncpy_from_user(dst,src,count,res) \
do { \ do { \
long __d0, __d1, __d2; \ long __d0, __d1, __d2; \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" testq %1,%1\n" \ " testq %1,%1\n" \
" jz 2f\n" \ " jz 2f\n" \
...@@ -66,9 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -66,9 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
unsigned long __clear_user(void __user *addr, unsigned long size) unsigned long __clear_user(void __user *addr, unsigned long size)
{ {
long __d0; long __d0;
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
/* no memory constraint because it doesn't change any memory gcc knows /* no memory constraint because it doesn't change any memory gcc knows
about */ about */
asm volatile( asm volatile(
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/lockdep.h>
#include <linux/sched.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -159,9 +157,7 @@ extern int __get_user_bad(void); ...@@ -159,9 +157,7 @@ extern int __get_user_bad(void);
int __ret_gu; \ int __ret_gu; \
unsigned long __val_gu; \ unsigned long __val_gu; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \ __get_user_x(1, __ret_gu, __val_gu, ptr); \
...@@ -246,9 +242,7 @@ extern void __put_user_8(void); ...@@ -246,9 +242,7 @@ extern void __put_user_8(void);
int __ret_pu; \ int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \ __typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__pu_val = x; \ __pu_val = x; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
...@@ -273,9 +267,7 @@ extern void __put_user_8(void); ...@@ -273,9 +267,7 @@ extern void __put_user_8(void);
#define __put_user_size(x, ptr, size, retval, errret) \ #define __put_user_size(x, ptr, size, retval, errret) \
do { \ do { \
retval = 0; \ retval = 0; \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
...@@ -328,9 +320,7 @@ do { \ ...@@ -328,9 +320,7 @@ do { \
#define __get_user_size(x, ptr, size, retval, errret) \ #define __get_user_size(x, ptr, size, retval, errret) \
do { \ do { \
retval = 0; \ retval = 0; \
might_sleep(); \ might_fault(); \
if (current->mm) \
might_lock_read(&current->mm->mmap_sem); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
......
...@@ -82,9 +82,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) ...@@ -82,9 +82,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long __must_check static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
return __copy_to_user_inatomic(to, from, n); return __copy_to_user_inatomic(to, from, n);
} }
...@@ -139,9 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) ...@@ -139,9 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n) __copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
unsigned long ret; unsigned long ret;
...@@ -163,9 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -163,9 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
unsigned long ret; unsigned long ret;
......
...@@ -29,9 +29,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) ...@@ -29,9 +29,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size); return copy_user_generic(dst, (__force void *)src, size);
switch (size) { switch (size) {
...@@ -75,9 +73,7 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) ...@@ -75,9 +73,7 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size); return copy_user_generic((__force void *)dst, src, size);
switch (size) { switch (size) {
...@@ -121,9 +117,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ...@@ -121,9 +117,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{ {
int ret = 0; int ret = 0;
might_sleep(); might_fault();
if (current->mm)
might_lock_read(&current->mm->mmap_sem);
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, return copy_user_generic((__force void *)dst,
(__force void *)src, size); (__force void *)src, size);
......
...@@ -140,6 +140,15 @@ extern int _cond_resched(void); ...@@ -140,6 +140,15 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \ (__x < 0) ? -__x : __x; \
}) })
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void);
#else
static inline void might_fault(void)
{
might_sleep();
}
#endif
extern struct atomic_notifier_head panic_notifier_list; extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time); extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...) NORET_TYPE void panic(const char * fmt, ...)
......
...@@ -3016,3 +3016,18 @@ void print_vma_addr(char *prefix, unsigned long ip) ...@@ -3016,3 +3016,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
} }
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} }
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void)
{
might_sleep();
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
if (!in_atomic() && current->mm)
might_lock_read(&current->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment