Commit 594cc251 authored by Linus Torvalds's avatar Linus Torvalds

make 'user_access_begin()' do 'access_ok()'

Originally, the rule used to be that you'd have to do access_ok()
separately, and then user_access_begin() before actually doing the
direct (optimized) user access.

But experience has shown that people then decide not to do access_ok()
at all, and instead rely on it being implied by other operations or
similar.  Which makes it very hard to verify that the access has
actually been range-checked.

If you use the unsafe direct user accesses, hardware features (either
SMAP - Supervisor Mode Access Protection - on x86, or PAN - Privileged
Access Never - on ARM) do force you to use user_access_begin().  But
nothing really forces the range check.

By putting the range check into user_access_begin(), we actually force
people to do the right thing (tm), and the range check vill be visible
near the actual accesses.  We have way too long a history of people
trying to avoid them.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0b2c8f8b
...@@ -705,7 +705,14 @@ extern struct movsl_mask { ...@@ -705,7 +705,14 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the * checking before using them, but you have to surround them with the
* user_access_begin/end() pair. * user_access_begin/end() pair.
*/ */
#define user_access_begin() __uaccess_begin() static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
__uaccess_begin();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end() #define user_access_end() __uaccess_end()
#define unsafe_put_user(x, ptr, err_label) \ #define unsafe_put_user(x, ptr, err_label) \
......
...@@ -1624,7 +1624,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) ...@@ -1624,7 +1624,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
* happened we would make the mistake of assuming that the * happened we would make the mistake of assuming that the
* relocations were valid. * relocations were valid.
*/ */
user_access_begin(); if (!user_access_begin(urelocs, size))
goto end_user;
for (copied = 0; copied < nreloc; copied++) for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1, unsafe_put_user(-1,
&urelocs[copied].presumed_offset, &urelocs[copied].presumed_offset,
...@@ -2606,7 +2608,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, ...@@ -2606,7 +2608,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
unsigned int i; unsigned int i;
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
user_access_begin(); /*
* Note: count * sizeof(*user_exec_list) does not overflow,
* because we checked 'count' in check_buffer_count().
*
* And this range already got effectively checked earlier
* when we did the "copy_from_user()" above.
*/
if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
goto end_user;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE)) if (!(exec2_list[i].offset & UPDATE))
continue; continue;
......
...@@ -264,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); ...@@ -264,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
probe_kernel_read(&retval, addr, sizeof(retval)) probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin #ifndef user_access_begin
#define user_access_begin() do { } while (0) #define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0) #define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
......
...@@ -354,10 +354,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, ...@@ -354,10 +354,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(umask, bitmap_size / 8)) if (!user_access_begin(umask, bitmap_size / 8))
return -EFAULT; return -EFAULT;
user_access_begin();
while (nr_compat_longs > 1) { while (nr_compat_longs > 1) {
compat_ulong_t l1, l2; compat_ulong_t l1, l2;
unsafe_get_user(l1, umask++, Efault); unsafe_get_user(l1, umask++, Efault);
...@@ -384,10 +383,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, ...@@ -384,10 +383,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(umask, bitmap_size / 8)) if (!user_access_begin(umask, bitmap_size / 8))
return -EFAULT; return -EFAULT;
user_access_begin();
while (nr_compat_longs > 1) { while (nr_compat_longs > 1) {
unsigned long m = *mask++; unsigned long m = *mask++;
unsafe_put_user((compat_ulong_t)m, umask++, Efault); unsafe_put_user((compat_ulong_t)m, umask++, Efault);
......
...@@ -1604,10 +1604,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, ...@@ -1604,10 +1604,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
if (!infop) if (!infop)
return err; return err;
if (!access_ok(infop, sizeof(*infop))) if (!user_access_begin(infop, sizeof(*infop)))
return -EFAULT; return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault);
...@@ -1732,10 +1731,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, ...@@ -1732,10 +1731,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
if (!infop) if (!infop)
return err; return err;
if (!access_ok(infop, sizeof(*infop))) if (!user_access_begin(infop, sizeof(*infop)))
return -EFAULT; return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault);
......
...@@ -114,11 +114,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -114,11 +114,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
kasan_check_write(dst, count); kasan_check_write(dst, count);
check_object_size(dst, count, false); check_object_size(dst, count, false);
user_access_begin(); if (user_access_begin(src, max)) {
retval = do_strncpy_from_user(dst, src, count, max); retval = do_strncpy_from_user(dst, src, count, max);
user_access_end(); user_access_end();
return retval; return retval;
} }
}
return -EFAULT; return -EFAULT;
} }
EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(strncpy_from_user);
...@@ -114,11 +114,12 @@ long strnlen_user(const char __user *str, long count) ...@@ -114,11 +114,12 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr; unsigned long max = max_addr - src_addr;
long retval; long retval;
user_access_begin(); if (user_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max); retval = do_strnlen_user(str, count, max);
user_access_end(); user_access_end();
return retval; return retval;
} }
}
return 0; return 0;
} }
EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(strnlen_user);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment