Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
beba3a20
Commit
beba3a20
authored
Mar 25, 2017
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
x86: switch to RAW_COPY_USER
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
a41e0d75
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
24 additions
and
395 deletions
+24
-395
arch/x86/Kconfig
arch/x86/Kconfig
+1
-0
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess.h
+0
-53
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_32.h
+15
-80
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uaccess_64.h
+5
-40
arch/x86/lib/usercopy.c
arch/x86/lib/usercopy.c
+1
-54
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_32.c
+2
-168
No files found.
arch/x86/Kconfig
View file @
beba3a20
...
...
@@ -175,6 +175,7 @@ config X86
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select X86_FEATURE_NAMES if PROC_FS
select ARCH_HAS_RAW_COPY_USER
config INSTRUCTION_DECODER
def_bool y
...
...
arch/x86/include/asm/uaccess.h
View file @
beba3a20
...
...
@@ -682,59 +682,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h>
#endif
unsigned
long
__must_check
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
n
);
unsigned
long
__must_check
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
n
);
extern
void
__compiletime_error
(
"usercopy buffer size is too small"
)
__bad_copy_user
(
void
);
static
inline
void
copy_user_overflow
(
int
size
,
unsigned
long
count
)
{
WARN
(
1
,
"Buffer overflow detected (%d < %lu)!
\n
"
,
size
,
count
);
}
static
__always_inline
unsigned
long
__must_check
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
to
);
might_fault
();
kasan_check_write
(
to
,
n
);
if
(
likely
(
sz
<
0
||
sz
>=
n
))
{
check_object_size
(
to
,
n
,
false
);
n
=
_copy_from_user
(
to
,
from
,
n
);
}
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
static
__always_inline
unsigned
long
__must_check
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
from
);
kasan_check_read
(
from
,
n
);
might_fault
();
if
(
likely
(
sz
<
0
||
sz
>=
n
))
{
check_object_size
(
from
,
n
,
true
);
n
=
_copy_to_user
(
to
,
from
,
n
);
}
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
...
...
arch/x86/include/asm/uaccess_32.h
View file @
beba3a20
...
...
@@ -8,113 +8,48 @@
#include <asm/asm.h>
#include <asm/page.h>
unsigned
long
__must_check
__copy_to_user_ll
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_user_ll
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static
__always_inline
unsigned
long
__must_check
__copy_to_user_inatomic
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
check_object_size
(
from
,
n
,
true
);
return
__copy_to_user_ll
(
to
,
from
,
n
);
return
__copy_user_ll
((
__force
void
*
)
to
,
from
,
n
);
}
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static
__always_inline
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_to_user_inatomic
(
to
,
from
,
n
);
}
static
__always_inline
unsigned
long
__copy_from_user_inatomic
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
return
__copy_from_user_ll_nozero
(
to
,
from
,
n
);
}
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - may be called from
* atomic context and will fail rather than sleep. In this case the
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
* for explanation of why this is needed.
*/
static
__always_inline
unsigned
long
_
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
raw
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
check_object_size
(
to
,
n
,
false
);
if
(
__builtin_constant_p
(
n
))
{
unsigned
long
ret
;
switch
(
n
)
{
case
1
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u8
*
)
to
,
from
,
1
,
ret
,
1
);
__get_user_asm_nozero
(
*
(
u8
*
)
to
,
from
,
ret
,
"b"
,
"b"
,
"=q"
,
1
);
__uaccess_end
();
return
ret
;
case
2
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u16
*
)
to
,
from
,
2
,
ret
,
2
);
__get_user_asm_nozero
(
*
(
u16
*
)
to
,
from
,
ret
,
"w"
,
"w"
,
"=r"
,
2
);
__uaccess_end
();
return
ret
;
case
4
:
ret
=
0
;
__uaccess_begin
();
__get_user_size
(
*
(
u32
*
)
to
,
from
,
4
,
ret
,
4
);
__get_user_asm_nozero
(
*
(
u32
*
)
to
,
from
,
ret
,
"l"
,
"k"
,
"=r"
,
4
);
__uaccess_end
();
return
ret
;
}
}
return
__copy_
from_user_ll
(
to
,
from
,
n
);
return
__copy_
user_ll
(
to
,
(
__force
const
void
*
)
from
,
n
);
}
static
__always_inline
unsigned
long
...
...
arch/x86/include/asm/uaccess_64.h
View file @
beba3a20
...
...
@@ -45,15 +45,11 @@ copy_user_generic(void *to, const void *from, unsigned len)
return
ret
;
}
__must_check
unsigned
long
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
len
);
static
__always_inline
__must_check
int
__copy_from_user_nocheck
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
static
__always_inline
__must_check
unsigned
long
raw_copy_from_user
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
long
size
)
{
int
ret
=
0
;
check_object_size
(
dst
,
size
,
false
);
if
(
!
__builtin_constant_p
(
size
))
return
copy_user_generic
(
dst
,
(
__force
void
*
)
src
,
size
);
switch
(
size
)
{
...
...
@@ -106,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
}
}
static
__always_inline
__must_check
int
__copy_from_user
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
{
might_fault
();
kasan_check_write
(
dst
,
size
);
return
__copy_from_user_nocheck
(
dst
,
src
,
size
);
}
static
__always_inline
__must_check
int
__copy_to_user_nocheck
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
static
__always_inline
__must_check
unsigned
long
raw_copy_to_user
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
long
size
)
{
int
ret
=
0
;
check_object_size
(
src
,
size
,
true
);
if
(
!
__builtin_constant_p
(
size
))
return
copy_user_generic
((
__force
void
*
)
dst
,
src
,
size
);
switch
(
size
)
{
...
...
@@ -175,34 +162,12 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
}
static
__always_inline
__must_check
int
__copy_to_user
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
{
might_fault
();
kasan_check_read
(
src
,
size
);
return
__copy_to_user_nocheck
(
dst
,
src
,
size
);
}
static
__always_inline
__must_check
int
__copy_in_user
(
void
__user
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
unsigned
long
raw_copy_in_user
(
void
__user
*
dst
,
const
void
__user
*
src
,
unsigned
long
size
)
{
return
copy_user_generic
((
__force
void
*
)
dst
,
(
__force
void
*
)
src
,
size
);
}
static
__must_check
__always_inline
int
__copy_from_user_inatomic
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
{
kasan_check_write
(
dst
,
size
);
return
__copy_from_user_nocheck
(
dst
,
src
,
size
);
}
static
__must_check
__always_inline
int
__copy_to_user_inatomic
(
void
__user
*
dst
,
const
void
*
src
,
unsigned
size
)
{
kasan_check_read
(
src
,
size
);
return
__copy_to_user_nocheck
(
dst
,
src
,
size
);
}
extern
long
__copy_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
,
int
zerorest
);
...
...
arch/x86/lib/usercopy.c
View file @
beba3a20
...
...
@@ -4,12 +4,9 @@
* For licencing details see kernel-base/COPYING
*/
#include <linux/
highmem
.h>
#include <linux/
uaccess
.h>
#include <linux/export.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
...
...
@@ -34,53 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return
ret
;
}
EXPORT_SYMBOL_GPL
(
copy_from_user_nmi
);
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned
long
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
EXPORT_SYMBOL
(
_copy_to_user
);
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned
long
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
n
)
{
unsigned
long
res
=
n
;
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
res
=
__copy_from_user_inatomic
(
to
,
from
,
n
);
if
(
unlikely
(
res
))
memset
(
to
+
n
-
res
,
0
,
res
);
return
res
;
}
EXPORT_SYMBOL
(
_copy_from_user
);
arch/x86/lib/usercopy_32.c
View file @
beba3a20
...
...
@@ -5,12 +5,7 @@
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/mmx.h>
#include <asm/asm.h>
...
...
@@ -201,98 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
return
size
;
}
static
unsigned
long
__copy_user_zeroing_intel
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
int
d0
,
d1
;
__asm__
__volatile__
(
" .align 2,0x90
\n
"
"0: movl 32(%4), %%eax
\n
"
" cmpl $67, %0
\n
"
" jbe 2f
\n
"
"1: movl 64(%4), %%eax
\n
"
" .align 2,0x90
\n
"
"2: movl 0(%4), %%eax
\n
"
"21: movl 4(%4), %%edx
\n
"
" movl %%eax, 0(%3)
\n
"
" movl %%edx, 4(%3)
\n
"
"3: movl 8(%4), %%eax
\n
"
"31: movl 12(%4),%%edx
\n
"
" movl %%eax, 8(%3)
\n
"
" movl %%edx, 12(%3)
\n
"
"4: movl 16(%4), %%eax
\n
"
"41: movl 20(%4), %%edx
\n
"
" movl %%eax, 16(%3)
\n
"
" movl %%edx, 20(%3)
\n
"
"10: movl 24(%4), %%eax
\n
"
"51: movl 28(%4), %%edx
\n
"
" movl %%eax, 24(%3)
\n
"
" movl %%edx, 28(%3)
\n
"
"11: movl 32(%4), %%eax
\n
"
"61: movl 36(%4), %%edx
\n
"
" movl %%eax, 32(%3)
\n
"
" movl %%edx, 36(%3)
\n
"
"12: movl 40(%4), %%eax
\n
"
"71: movl 44(%4), %%edx
\n
"
" movl %%eax, 40(%3)
\n
"
" movl %%edx, 44(%3)
\n
"
"13: movl 48(%4), %%eax
\n
"
"81: movl 52(%4), %%edx
\n
"
" movl %%eax, 48(%3)
\n
"
" movl %%edx, 52(%3)
\n
"
"14: movl 56(%4), %%eax
\n
"
"91: movl 60(%4), %%edx
\n
"
" movl %%eax, 56(%3)
\n
"
" movl %%edx, 60(%3)
\n
"
" addl $-64, %0
\n
"
" addl $64, %4
\n
"
" addl $64, %3
\n
"
" cmpl $63, %0
\n
"
" ja 0b
\n
"
"5: movl %0, %%eax
\n
"
" shrl $2, %0
\n
"
" andl $3, %%eax
\n
"
" cld
\n
"
"6: rep; movsl
\n
"
" movl %%eax,%0
\n
"
"7: rep; movsb
\n
"
"8:
\n
"
".section .fixup,
\"
ax
\"\n
"
"9: lea 0(%%eax,%0,4),%0
\n
"
"16: pushl %0
\n
"
" pushl %%eax
\n
"
" xorl %%eax,%%eax
\n
"
" rep; stosb
\n
"
" popl %%eax
\n
"
" popl %0
\n
"
" jmp 8b
\n
"
".previous
\n
"
_ASM_EXTABLE
(
0
b
,
16
b
)
_ASM_EXTABLE
(
1
b
,
16
b
)
_ASM_EXTABLE
(
2
b
,
16
b
)
_ASM_EXTABLE
(
21
b
,
16
b
)
_ASM_EXTABLE
(
3
b
,
16
b
)
_ASM_EXTABLE
(
31
b
,
16
b
)
_ASM_EXTABLE
(
4
b
,
16
b
)
_ASM_EXTABLE
(
41
b
,
16
b
)
_ASM_EXTABLE
(
10
b
,
16
b
)
_ASM_EXTABLE
(
51
b
,
16
b
)
_ASM_EXTABLE
(
11
b
,
16
b
)
_ASM_EXTABLE
(
61
b
,
16
b
)
_ASM_EXTABLE
(
12
b
,
16
b
)
_ASM_EXTABLE
(
71
b
,
16
b
)
_ASM_EXTABLE
(
13
b
,
16
b
)
_ASM_EXTABLE
(
81
b
,
16
b
)
_ASM_EXTABLE
(
14
b
,
16
b
)
_ASM_EXTABLE
(
91
b
,
16
b
)
_ASM_EXTABLE
(
6
b
,
9
b
)
_ASM_EXTABLE
(
7
b
,
16
b
)
:
"=&c"
(
size
),
"=&D"
(
d0
),
"=&S"
(
d1
)
:
"1"
(
to
),
"2"
(
from
),
"0"
(
size
)
:
"eax"
,
"edx"
,
"memory"
);
return
size
;
}
static
unsigned
long
__copy_user_intel_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
...
...
@@ -387,8 +290,6 @@ static unsigned long __copy_user_intel_nocache(void *to,
* Leave these declared but undefined. They should not be any references to
* them
*/
unsigned
long
__copy_user_zeroing_intel
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
);
unsigned
long
__copy_user_intel
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
size
);
#endif
/* CONFIG_X86_INTEL_USERCOPY */
...
...
@@ -427,47 +328,7 @@ do { \
: "memory"); \
} while (0)
#define __copy_user_zeroing(to, from, size) \
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 6f\n" \
"3: lea 0(%3,%0,4),%0\n" \
"6: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(4b,5b) \
_ASM_EXTABLE(0b,3b) \
_ASM_EXTABLE(1b,6b) \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
unsigned
long
__copy_to_user_ll
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
unsigned
long
__copy_user_ll
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
...
...
@@ -477,34 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_to_user_ll
);
unsigned
long
__copy_from_user_ll
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
__copy_user_zeroing
(
to
,
from
,
n
);
else
n
=
__copy_user_zeroing_intel
(
to
,
from
,
n
);
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_from_user_ll
);
unsigned
long
__copy_from_user_ll_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
stac
();
if
(
movsl_is_ok
(
to
,
from
,
n
))
__copy_user
(
to
,
from
,
n
);
else
n
=
__copy_user_intel
((
void
__user
*
)
to
,
(
const
void
*
)
from
,
n
);
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_from_user_ll_nozero
);
EXPORT_SYMBOL
(
__copy_user_ll
);
unsigned
long
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment