Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3f763453
Commit
3f763453
authored
Mar 25, 2017
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
kill __copy_from_user_nocache()
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
122b05dd
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
2 additions
and
164 deletions
+2
-164
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_32.h
+0
-30
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uaccess_64.h
+0
-8
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_32.c
+0
-118
include/linux/uaccess.h
include/linux/uaccess.h
+0
-6
lib/iov_iter.c
lib/iov_iter.c
+2
-2
No files found.
arch/x86/include/asm/uaccess_32.h
View file @
3f763453
...
...
@@ -14,8 +14,6 @@ unsigned long __must_check __copy_from_user_ll
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
unsigned
long
__must_check
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
...
...
@@ -119,34 +117,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return
__copy_from_user_ll
(
to
,
from
,
n
);
}
static
__always_inline
unsigned
long
__copy_from_user_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
if
(
__builtin_constant_p
(
n
))
{
unsigned
long
ret
;
switch
(
n
)
{
case
1
:
__uaccess_begin
();
__get_user_size
(
*
(
u8
*
)
to
,
from
,
1
,
ret
,
1
);
__uaccess_end
();
return
ret
;
case
2
:
__uaccess_begin
();
__get_user_size
(
*
(
u16
*
)
to
,
from
,
2
,
ret
,
2
);
__uaccess_end
();
return
ret
;
case
4
:
__uaccess_begin
();
__get_user_size
(
*
(
u32
*
)
to
,
from
,
4
,
ret
,
4
);
__uaccess_end
();
return
ret
;
}
}
return
__copy_from_user_ll_nocache
(
to
,
from
,
n
);
}
static
__always_inline
unsigned
long
__copy_from_user_inatomic_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
...
...
arch/x86/include/asm/uaccess_64.h
View file @
3f763453
...
...
@@ -260,14 +260,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
extern
long
__copy_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
,
int
zerorest
);
static
inline
int
__copy_from_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
{
might_fault
();
kasan_check_write
(
dst
,
size
);
return
__copy_user_nocache
(
dst
,
src
,
size
,
1
);
}
static
inline
int
__copy_from_user_inatomic_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
)
...
...
arch/x86/lib/usercopy_32.c
View file @
3f763453
...
...
@@ -293,105 +293,6 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
return
size
;
}
/*
* Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
* hyoshiok@miraclelinux.com
*/
static
unsigned
long
__copy_user_zeroing_intel_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
int
d0
,
d1
;
__asm__
__volatile__
(
" .align 2,0x90
\n
"
"0: movl 32(%4), %%eax
\n
"
" cmpl $67, %0
\n
"
" jbe 2f
\n
"
"1: movl 64(%4), %%eax
\n
"
" .align 2,0x90
\n
"
"2: movl 0(%4), %%eax
\n
"
"21: movl 4(%4), %%edx
\n
"
" movnti %%eax, 0(%3)
\n
"
" movnti %%edx, 4(%3)
\n
"
"3: movl 8(%4), %%eax
\n
"
"31: movl 12(%4),%%edx
\n
"
" movnti %%eax, 8(%3)
\n
"
" movnti %%edx, 12(%3)
\n
"
"4: movl 16(%4), %%eax
\n
"
"41: movl 20(%4), %%edx
\n
"
" movnti %%eax, 16(%3)
\n
"
" movnti %%edx, 20(%3)
\n
"
"10: movl 24(%4), %%eax
\n
"
"51: movl 28(%4), %%edx
\n
"
" movnti %%eax, 24(%3)
\n
"
" movnti %%edx, 28(%3)
\n
"
"11: movl 32(%4), %%eax
\n
"
"61: movl 36(%4), %%edx
\n
"
" movnti %%eax, 32(%3)
\n
"
" movnti %%edx, 36(%3)
\n
"
"12: movl 40(%4), %%eax
\n
"
"71: movl 44(%4), %%edx
\n
"
" movnti %%eax, 40(%3)
\n
"
" movnti %%edx, 44(%3)
\n
"
"13: movl 48(%4), %%eax
\n
"
"81: movl 52(%4), %%edx
\n
"
" movnti %%eax, 48(%3)
\n
"
" movnti %%edx, 52(%3)
\n
"
"14: movl 56(%4), %%eax
\n
"
"91: movl 60(%4), %%edx
\n
"
" movnti %%eax, 56(%3)
\n
"
" movnti %%edx, 60(%3)
\n
"
" addl $-64, %0
\n
"
" addl $64, %4
\n
"
" addl $64, %3
\n
"
" cmpl $63, %0
\n
"
" ja 0b
\n
"
" sfence
\n
"
"5: movl %0, %%eax
\n
"
" shrl $2, %0
\n
"
" andl $3, %%eax
\n
"
" cld
\n
"
"6: rep; movsl
\n
"
" movl %%eax,%0
\n
"
"7: rep; movsb
\n
"
"8:
\n
"
".section .fixup,
\"
ax
\"\n
"
"9: lea 0(%%eax,%0,4),%0
\n
"
"16: pushl %0
\n
"
" pushl %%eax
\n
"
" xorl %%eax,%%eax
\n
"
" rep; stosb
\n
"
" popl %%eax
\n
"
" popl %0
\n
"
" jmp 8b
\n
"
".previous
\n
"
_ASM_EXTABLE
(
0
b
,
16
b
)
_ASM_EXTABLE
(
1
b
,
16
b
)
_ASM_EXTABLE
(
2
b
,
16
b
)
_ASM_EXTABLE
(
21
b
,
16
b
)
_ASM_EXTABLE
(
3
b
,
16
b
)
_ASM_EXTABLE
(
31
b
,
16
b
)
_ASM_EXTABLE
(
4
b
,
16
b
)
_ASM_EXTABLE
(
41
b
,
16
b
)
_ASM_EXTABLE
(
10
b
,
16
b
)
_ASM_EXTABLE
(
51
b
,
16
b
)
_ASM_EXTABLE
(
11
b
,
16
b
)
_ASM_EXTABLE
(
61
b
,
16
b
)
_ASM_EXTABLE
(
12
b
,
16
b
)
_ASM_EXTABLE
(
71
b
,
16
b
)
_ASM_EXTABLE
(
13
b
,
16
b
)
_ASM_EXTABLE
(
81
b
,
16
b
)
_ASM_EXTABLE
(
14
b
,
16
b
)
_ASM_EXTABLE
(
91
b
,
16
b
)
_ASM_EXTABLE
(
6
b
,
9
b
)
_ASM_EXTABLE
(
7
b
,
16
b
)
:
"=&c"
(
size
),
"=&D"
(
d0
),
"=&S"
(
d1
)
:
"1"
(
to
),
"2"
(
from
),
"0"
(
size
)
:
"eax"
,
"edx"
,
"memory"
);
return
size
;
}
static
unsigned
long
__copy_user_intel_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
...
...
@@ -490,8 +391,6 @@ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned
long
size
);
unsigned
long
__copy_user_intel
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
size
);
unsigned
long
__copy_user_zeroing_intel_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
);
#endif
/* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
...
...
@@ -607,23 +506,6 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
}
EXPORT_SYMBOL
(
__copy_from_user_ll_nozero
);
unsigned
long
__copy_from_user_ll_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
stac
();
#ifdef CONFIG_X86_INTEL_USERCOPY
if
(
n
>
64
&&
static_cpu_has
(
X86_FEATURE_XMM2
))
n
=
__copy_user_zeroing_intel_nocache
(
to
,
from
,
n
);
else
__copy_user_zeroing
(
to
,
from
,
n
);
#else
__copy_user_zeroing
(
to
,
from
,
n
);
#endif
clac
();
return
n
;
}
EXPORT_SYMBOL
(
__copy_from_user_ll_nocache
);
unsigned
long
__copy_from_user_ll_nocache_nozero
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
...
...
include/linux/uaccess.h
View file @
3f763453
...
...
@@ -261,12 +261,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return
__copy_from_user_inatomic
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__copy_from_user_nocache
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
return
__copy_from_user
(
to
,
from
,
n
);
}
#endif
/* ARCH_HAS_NOCACHE_UACCESS */
/*
...
...
lib/iov_iter.c
View file @
3f763453
...
...
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return
0
;
}
iterate_and_advance
(
i
,
bytes
,
v
,
__copy_from_user_nocache
((
to
+=
v
.
iov_len
)
-
v
.
iov_len
,
__copy_from_user_
inatomic_
nocache
((
to
+=
v
.
iov_len
)
-
v
.
iov_len
,
v
.
iov_base
,
v
.
iov_len
),
memcpy_from_page
((
to
+=
v
.
bv_len
)
-
v
.
bv_len
,
v
.
bv_page
,
v
.
bv_offset
,
v
.
bv_len
),
...
...
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if
(
unlikely
(
i
->
count
<
bytes
))
return
false
;
iterate_all_kinds
(
i
,
bytes
,
v
,
({
if
(
__copy_from_user_nocache
((
to
+=
v
.
iov_len
)
-
v
.
iov_len
,
if
(
__copy_from_user_
inatomic_
nocache
((
to
+=
v
.
iov_len
)
-
v
.
iov_len
,
v
.
iov_base
,
v
.
iov_len
))
return
false
;
0
;}),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment