Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
de989ef0
Commit
de989ef0
authored
Jul 09, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/unify-lib' into x86/core
parents
a737abd1
22cac167
Changes
18
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
574 additions
and
1076 deletions
+574
-1076
arch/x86/Kconfig.cpu
arch/x86/Kconfig.cpu
+1
-1
arch/x86/ia32/ia32entry.S
arch/x86/ia32/ia32entry.S
+14
-11
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/asm-offsets_64.c
+1
-1
arch/x86/kernel/entry_64.S
arch/x86/kernel/entry_64.S
+12
-11
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc.c
+1
-0
arch/x86/lib/Makefile
arch/x86/lib/Makefile
+2
-2
arch/x86/lib/copy_user_64.S
arch/x86/lib/copy_user_64.S
+2
-2
arch/x86/lib/delay.c
arch/x86/lib/delay.c
+8
-9
arch/x86/lib/delay_64.c
arch/x86/lib/delay_64.c
+0
-85
arch/x86/lib/getuser.S
arch/x86/lib/getuser.S
+41
-46
arch/x86/lib/getuser_32.S
arch/x86/lib/getuser_32.S
+0
-78
arch/x86/lib/putuser.S
arch/x86/lib/putuser.S
+36
-37
arch/x86/lib/putuser_64.S
arch/x86/lib/putuser_64.S
+0
-106
include/asm-x86/asm.h
include/asm-x86/asm.h
+8
-1
include/asm-x86/delay.h
include/asm-x86/delay.h
+0
-4
include/asm-x86/uaccess.h
include/asm-x86/uaccess.h
+448
-0
include/asm-x86/uaccess_32.h
include/asm-x86/uaccess_32.h
+0
-422
include/asm-x86/uaccess_64.h
include/asm-x86/uaccess_64.h
+0
-260
No files found.
arch/x86/Kconfig.cpu
View file @
de989ef0
...
@@ -344,7 +344,7 @@ config X86_F00F_BUG
...
@@ -344,7 +344,7 @@ config X86_F00F_BUG
config X86_WP_WORKS_OK
config X86_WP_WORKS_OK
def_bool y
def_bool y
depends on
X86_32 &&
!M386
depends on !M386
config X86_INVLPG
config X86_INVLPG
def_bool y
def_bool y
...
...
arch/x86/ia32/ia32entry.S
View file @
de989ef0
...
@@ -116,7 +116,7 @@ ENTRY(ia32_sysenter_target)
...
@@ -116,7 +116,7 @@ ENTRY(ia32_sysenter_target)
pushfq
pushfq
CFI_ADJUST_CFA_OFFSET
8
CFI_ADJUST_CFA_OFFSET
8
/*
CFI_REL_OFFSET
rflags
,
0
*/
/*
CFI_REL_OFFSET
rflags
,
0
*/
movl
8
*
3
-
THREAD_SIZE
+
threadinfo
_sysenter_return
(%
rsp
),
%
r10d
movl
8
*
3
-
THREAD_SIZE
+
TI
_sysenter_return
(%
rsp
),
%
r10d
CFI_REGISTER
rip
,
r10
CFI_REGISTER
rip
,
r10
pushq
$
__USER32_CS
pushq
$
__USER32_CS
CFI_ADJUST_CFA_OFFSET
8
CFI_ADJUST_CFA_OFFSET
8
...
@@ -136,8 +136,9 @@ ENTRY(ia32_sysenter_target)
...
@@ -136,8 +136,9 @@ ENTRY(ia32_sysenter_target)
.
quad
1
b
,
ia32_badarg
.
quad
1
b
,
ia32_badarg
.
previous
.
previous
GET_THREAD_INFO
(%
r10
)
GET_THREAD_INFO
(%
r10
)
orl
$TS_COMPAT
,
threadinfo_status
(%
r10
)
orl
$TS_COMPAT
,
TI_status
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
threadinfo_flags
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
\
TI_flags
(%
r10
)
CFI_REMEMBER_STATE
CFI_REMEMBER_STATE
jnz
sysenter_tracesys
jnz
sysenter_tracesys
sysenter_do_call
:
sysenter_do_call
:
...
@@ -149,9 +150,9 @@ sysenter_do_call:
...
@@ -149,9 +150,9 @@ sysenter_do_call:
GET_THREAD_INFO
(%
r10
)
GET_THREAD_INFO
(%
r10
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
TRACE_IRQS_OFF
TRACE_IRQS_OFF
testl
$
_TIF_ALLWORK_MASK
,
threadinfo
_flags
(%
r10
)
testl
$
_TIF_ALLWORK_MASK
,
TI
_flags
(%
r10
)
jnz
int_ret_from_sys_call
jnz
int_ret_from_sys_call
andl
$~
TS_COMPAT
,
threadinfo
_status
(%
r10
)
andl
$~
TS_COMPAT
,
TI
_status
(%
r10
)
/
*
clear
IF
,
that
popfq
doesn
't enable interrupts early */
/
*
clear
IF
,
that
popfq
doesn
't enable interrupts early */
andl
$~
0x200
,
EFLAGS
-
R11
(%
rsp
)
andl
$~
0x200
,
EFLAGS
-
R11
(%
rsp
)
movl
RIP
-
R11
(%
rsp
),%
edx
/*
User
%
eip
*/
movl
RIP
-
R11
(%
rsp
),%
edx
/*
User
%
eip
*/
...
@@ -240,8 +241,9 @@ ENTRY(ia32_cstar_target)
...
@@ -240,8 +241,9 @@ ENTRY(ia32_cstar_target)
.
quad
1
b
,
ia32_badarg
.
quad
1
b
,
ia32_badarg
.
previous
.
previous
GET_THREAD_INFO
(%
r10
)
GET_THREAD_INFO
(%
r10
)
orl
$TS_COMPAT
,
threadinfo_status
(%
r10
)
orl
$TS_COMPAT
,
TI_status
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
threadinfo_flags
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
\
TI_flags
(%
r10
)
CFI_REMEMBER_STATE
CFI_REMEMBER_STATE
jnz
cstar_tracesys
jnz
cstar_tracesys
cstar_do_call
:
cstar_do_call
:
...
@@ -253,9 +255,9 @@ cstar_do_call:
...
@@ -253,9 +255,9 @@ cstar_do_call:
GET_THREAD_INFO
(%
r10
)
GET_THREAD_INFO
(%
r10
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
TRACE_IRQS_OFF
TRACE_IRQS_OFF
testl
$
_TIF_ALLWORK_MASK
,
threadinfo
_flags
(%
r10
)
testl
$
_TIF_ALLWORK_MASK
,
TI
_flags
(%
r10
)
jnz
int_ret_from_sys_call
jnz
int_ret_from_sys_call
andl
$~
TS_COMPAT
,
threadinfo
_status
(%
r10
)
andl
$~
TS_COMPAT
,
TI
_status
(%
r10
)
RESTORE_ARGS
1
,-
ARG_SKIP
,
1
,
1
,
1
RESTORE_ARGS
1
,-
ARG_SKIP
,
1
,
1
,
1
movl
RIP
-
ARGOFFSET
(%
rsp
),%
ecx
movl
RIP
-
ARGOFFSET
(%
rsp
),%
ecx
CFI_REGISTER
rip
,
rcx
CFI_REGISTER
rip
,
rcx
...
@@ -333,8 +335,9 @@ ENTRY(ia32_syscall)
...
@@ -333,8 +335,9 @@ ENTRY(ia32_syscall)
this
could
be
a
problem
.
*/
this
could
be
a
problem
.
*/
SAVE_ARGS
0
,
0
,
1
SAVE_ARGS
0
,
0
,
1
GET_THREAD_INFO
(%
r10
)
GET_THREAD_INFO
(%
r10
)
orl
$TS_COMPAT
,
threadinfo_status
(%
r10
)
orl
$TS_COMPAT
,
TI_status
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
threadinfo_flags
(%
r10
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
\
TI_flags
(%
r10
)
jnz
ia32_tracesys
jnz
ia32_tracesys
ia32_do_syscall
:
ia32_do_syscall
:
cmpl
$
(
IA32_NR_syscalls
-
1
),%
eax
cmpl
$
(
IA32_NR_syscalls
-
1
),%
eax
...
...
arch/x86/kernel/asm-offsets_64.c
View file @
de989ef0
...
@@ -34,7 +34,7 @@ int main(void)
...
@@ -34,7 +34,7 @@ int main(void)
ENTRY
(
pid
);
ENTRY
(
pid
);
BLANK
();
BLANK
();
#undef ENTRY
#undef ENTRY
#define ENTRY(entry) DEFINE(
threadinfo
_ ## entry, offsetof(struct thread_info, entry))
#define ENTRY(entry) DEFINE(
TI
_ ## entry, offsetof(struct thread_info, entry))
ENTRY
(
flags
);
ENTRY
(
flags
);
ENTRY
(
addr_limit
);
ENTRY
(
addr_limit
);
ENTRY
(
preempt_count
);
ENTRY
(
preempt_count
);
...
...
arch/x86/kernel/entry_64.S
View file @
de989ef0
...
@@ -168,13 +168,13 @@ ENTRY(ret_from_fork)
...
@@ -168,13 +168,13 @@ ENTRY(ret_from_fork)
CFI_ADJUST_CFA_OFFSET
-
4
CFI_ADJUST_CFA_OFFSET
-
4
call
schedule_tail
call
schedule_tail
GET_THREAD_INFO
(%
rcx
)
GET_THREAD_INFO
(%
rcx
)
testl
$
(
_TIF_SYSCALL_TRACE
|
_TIF_SYSCALL_AUDIT
),
threadinfo
_flags
(%
rcx
)
testl
$
(
_TIF_SYSCALL_TRACE
|
_TIF_SYSCALL_AUDIT
),
TI
_flags
(%
rcx
)
jnz
rff_trace
jnz
rff_trace
rff_action
:
rff_action
:
RESTORE_REST
RESTORE_REST
testl
$
3
,
CS
-
ARGOFFSET
(%
rsp
)
#
from
kernel_thread
?
testl
$
3
,
CS
-
ARGOFFSET
(%
rsp
)
#
from
kernel_thread
?
je
int_ret_from_sys_call
je
int_ret_from_sys_call
testl
$
_TIF_IA32
,
threadinfo
_flags
(%
rcx
)
testl
$
_TIF_IA32
,
TI
_flags
(%
rcx
)
jnz
int_ret_from_sys_call
jnz
int_ret_from_sys_call
RESTORE_TOP_OF_STACK
%
rdi
,
ARGOFFSET
RESTORE_TOP_OF_STACK
%
rdi
,
ARGOFFSET
jmp
ret_from_sys_call
jmp
ret_from_sys_call
...
@@ -243,7 +243,8 @@ ENTRY(system_call_after_swapgs)
...
@@ -243,7 +243,8 @@ ENTRY(system_call_after_swapgs)
movq
%
rcx
,
RIP
-
ARGOFFSET
(%
rsp
)
movq
%
rcx
,
RIP
-
ARGOFFSET
(%
rsp
)
CFI_REL_OFFSET
rip
,
RIP
-
ARGOFFSET
CFI_REL_OFFSET
rip
,
RIP
-
ARGOFFSET
GET_THREAD_INFO
(%
rcx
)
GET_THREAD_INFO
(%
rcx
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
threadinfo_flags
(%
rcx
)
testl
$
(
_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP
),
\
TI_flags
(%
rcx
)
jnz
tracesys
jnz
tracesys
cmpq
$
__NR_syscall_max
,%
rax
cmpq
$
__NR_syscall_max
,%
rax
ja
badsys
ja
badsys
...
@@ -262,7 +263,7 @@ sysret_check:
...
@@ -262,7 +263,7 @@ sysret_check:
GET_THREAD_INFO
(%
rcx
)
GET_THREAD_INFO
(%
rcx
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
TRACE_IRQS_OFF
TRACE_IRQS_OFF
movl
threadinfo
_flags
(%
rcx
),%
edx
movl
TI
_flags
(%
rcx
),%
edx
andl
%
edi
,%
edx
andl
%
edi
,%
edx
jnz
sysret_careful
jnz
sysret_careful
CFI_REMEMBER_STATE
CFI_REMEMBER_STATE
...
@@ -347,10 +348,10 @@ int_ret_from_sys_call:
...
@@ -347,10 +348,10 @@ int_ret_from_sys_call:
int_with_check
:
int_with_check
:
LOCKDEP_SYS_EXIT_IRQ
LOCKDEP_SYS_EXIT_IRQ
GET_THREAD_INFO
(%
rcx
)
GET_THREAD_INFO
(%
rcx
)
movl
threadinfo
_flags
(%
rcx
),%
edx
movl
TI
_flags
(%
rcx
),%
edx
andl
%
edi
,%
edx
andl
%
edi
,%
edx
jnz
int_careful
jnz
int_careful
andl
$~
TS_COMPAT
,
threadinfo
_status
(%
rcx
)
andl
$~
TS_COMPAT
,
TI
_status
(%
rcx
)
jmp
retint_swapgs
jmp
retint_swapgs
/
*
Either
reschedule
or
signal
or
syscall
exit
tracking
needed
.
*/
/
*
Either
reschedule
or
signal
or
syscall
exit
tracking
needed
.
*/
...
@@ -558,7 +559,7 @@ retint_with_reschedule:
...
@@ -558,7 +559,7 @@ retint_with_reschedule:
movl
$
_TIF_WORK_MASK
,%
edi
movl
$
_TIF_WORK_MASK
,%
edi
retint_check
:
retint_check
:
LOCKDEP_SYS_EXIT_IRQ
LOCKDEP_SYS_EXIT_IRQ
movl
threadinfo
_flags
(%
rcx
),%
edx
movl
TI
_flags
(%
rcx
),%
edx
andl
%
edi
,%
edx
andl
%
edi
,%
edx
CFI_REMEMBER_STATE
CFI_REMEMBER_STATE
jnz
retint_careful
jnz
retint_careful
...
@@ -654,9 +655,9 @@ retint_signal:
...
@@ -654,9 +655,9 @@ retint_signal:
/
*
Returning
to
kernel
space
.
Check
if
we
need
preemption
*/
/
*
Returning
to
kernel
space
.
Check
if
we
need
preemption
*/
/
*
rcx
:
threadinfo
.
interrupts
off
.
*/
/
*
rcx
:
threadinfo
.
interrupts
off
.
*/
ENTRY
(
retint_kernel
)
ENTRY
(
retint_kernel
)
cmpl
$
0
,
threadinfo
_preempt_count
(%
rcx
)
cmpl
$
0
,
TI
_preempt_count
(%
rcx
)
jnz
retint_restore_args
jnz
retint_restore_args
bt
$TIF_NEED_RESCHED
,
threadinfo
_flags
(%
rcx
)
bt
$TIF_NEED_RESCHED
,
TI
_flags
(%
rcx
)
jnc
retint_restore_args
jnc
retint_restore_args
bt
$
9
,
EFLAGS
-
ARGOFFSET
(%
rsp
)
/*
interrupts
off
?
*/
bt
$
9
,
EFLAGS
-
ARGOFFSET
(%
rsp
)
/*
interrupts
off
?
*/
jnc
retint_restore_args
jnc
retint_restore_args
...
@@ -819,7 +820,7 @@ paranoid_restore\trace:
...
@@ -819,7 +820,7 @@ paranoid_restore\trace:
jmp
irq_return
jmp
irq_return
paranoid_userspace
\
trace
:
paranoid_userspace
\
trace
:
GET_THREAD_INFO
(%
rcx
)
GET_THREAD_INFO
(%
rcx
)
movl
threadinfo
_flags
(%
rcx
),%
ebx
movl
TI
_flags
(%
rcx
),%
ebx
andl
$
_TIF_WORK_MASK
,%
ebx
andl
$
_TIF_WORK_MASK
,%
ebx
jz
paranoid_swapgs
\
trace
jz
paranoid_swapgs
\
trace
movq
%
rsp
,%
rdi
/*
&
pt_regs
*/
movq
%
rsp
,%
rdi
/*
&
pt_regs
*/
...
@@ -917,7 +918,7 @@ error_exit:
...
@@ -917,7 +918,7 @@ error_exit:
testl
%
eax
,%
eax
testl
%
eax
,%
eax
jne
retint_kernel
jne
retint_kernel
LOCKDEP_SYS_EXIT_IRQ
LOCKDEP_SYS_EXIT_IRQ
movl
threadinfo
_flags
(%
rcx
),%
edx
movl
TI
_flags
(%
rcx
),%
edx
movl
$
_TIF_WORK_MASK
,%
edi
movl
$
_TIF_WORK_MASK
,%
edi
andl
%
edi
,%
edx
andl
%
edi
,%
edx
jnz
retint_careful
jnz
retint_careful
...
...
arch/x86/kernel/tsc.c
View file @
de989ef0
...
@@ -513,6 +513,7 @@ void __init tsc_init(void)
...
@@ -513,6 +513,7 @@ void __init tsc_init(void)
*/
*/
for_each_possible_cpu
(
cpu
)
for_each_possible_cpu
(
cpu
)
set_cyc2ns_scale
(
cpu_khz
,
cpu
);
set_cyc2ns_scale
(
cpu_khz
,
cpu
);
use_tsc_delay
();
if
(
tsc_disabled
>
0
)
if
(
tsc_disabled
>
0
)
return
;
return
;
...
...
arch/x86/lib/Makefile
View file @
de989ef0
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
obj-$(CONFIG_SMP)
:=
msr-on-cpu.o
obj-$(CONFIG_SMP)
:=
msr-on-cpu.o
lib-y
:=
delay
_
$(BITS)
.o
lib-y
:=
delay.o
lib-y
+=
usercopy_
$(BITS)
.o getuser
_
$(BITS)
.o putuser_
$(BITS)
.o
lib-y
+=
usercopy_
$(BITS)
.o getuser
.o putuser
.o
lib-y
+=
memcpy_
$(BITS)
.o
lib-y
+=
memcpy_
$(BITS)
.o
ifeq
($(CONFIG_X86_32),y)
ifeq
($(CONFIG_X86_32),y)
...
...
arch/x86/lib/copy_user_64.S
View file @
de989ef0
...
@@ -40,7 +40,7 @@ ENTRY(copy_to_user)
...
@@ -40,7 +40,7 @@ ENTRY(copy_to_user)
movq
%
rdi
,%
rcx
movq
%
rdi
,%
rcx
addq
%
rdx
,%
rcx
addq
%
rdx
,%
rcx
jc
bad_to_user
jc
bad_to_user
cmpq
threadinfo
_addr_limit
(%
rax
),%
rcx
cmpq
TI
_addr_limit
(%
rax
),%
rcx
jae
bad_to_user
jae
bad_to_user
xorl
%
eax
,%
eax
/*
clear
zero
flag
*/
xorl
%
eax
,%
eax
/*
clear
zero
flag
*/
ALTERNATIVE_JUMP
X86_FEATURE_REP_GOOD
,
copy_user_generic_unrolled
,
copy_user_generic_string
ALTERNATIVE_JUMP
X86_FEATURE_REP_GOOD
,
copy_user_generic_unrolled
,
copy_user_generic_string
...
@@ -65,7 +65,7 @@ ENTRY(copy_from_user)
...
@@ -65,7 +65,7 @@ ENTRY(copy_from_user)
movq
%
rsi
,%
rcx
movq
%
rsi
,%
rcx
addq
%
rdx
,%
rcx
addq
%
rdx
,%
rcx
jc
bad_from_user
jc
bad_from_user
cmpq
threadinfo
_addr_limit
(%
rax
),%
rcx
cmpq
TI
_addr_limit
(%
rax
),%
rcx
jae
bad_from_user
jae
bad_from_user
movl
$
1
,%
ecx
/*
set
zero
flag
*/
movl
$
1
,%
ecx
/*
set
zero
flag
*/
ALTERNATIVE_JUMP
X86_FEATURE_REP_GOOD
,
copy_user_generic_unrolled
,
copy_user_generic_string
ALTERNATIVE_JUMP
X86_FEATURE_REP_GOOD
,
copy_user_generic_unrolled
,
copy_user_generic_string
...
...
arch/x86/lib/delay
_32
.c
→
arch/x86/lib/delay.c
View file @
de989ef0
...
@@ -29,7 +29,7 @@
...
@@ -29,7 +29,7 @@
/* simple loop based delay: */
/* simple loop based delay: */
static
void
delay_loop
(
unsigned
long
loops
)
static
void
delay_loop
(
unsigned
long
loops
)
{
{
__asm__
__volatile__
(
asm
volatile
(
" test %0,%0
\n
"
" test %0,%0
\n
"
" jz 3f
\n
"
" jz 3f
\n
"
" jmp 1f
\n
"
" jmp 1f
\n
"
...
@@ -38,9 +38,9 @@ static void delay_loop(unsigned long loops)
...
@@ -38,9 +38,9 @@ static void delay_loop(unsigned long loops)
"1: jmp 2f
\n
"
"1: jmp 2f
\n
"
".align 16
\n
"
".align 16
\n
"
"2: dec
l
%0
\n
"
"2: dec %0
\n
"
" jnz 2b
\n
"
" jnz 2b
\n
"
"3: dec
l
%0
\n
"
"3: dec %0
\n
"
:
/* we don't need output */
:
/* we don't need output */
:
"a"
(
loops
)
:
"a"
(
loops
)
...
@@ -98,7 +98,7 @@ void use_tsc_delay(void)
...
@@ -98,7 +98,7 @@ void use_tsc_delay(void)
int
__devinit
read_current_timer
(
unsigned
long
*
timer_val
)
int
__devinit
read_current_timer
(
unsigned
long
*
timer_val
)
{
{
if
(
delay_fn
==
delay_tsc
)
{
if
(
delay_fn
==
delay_tsc
)
{
rdtscl
(
*
timer_val
);
rdtscl
l
(
*
timer_val
);
return
0
;
return
0
;
}
}
return
-
1
;
return
-
1
;
...
@@ -108,31 +108,30 @@ void __delay(unsigned long loops)
...
@@ -108,31 +108,30 @@ void __delay(unsigned long loops)
{
{
delay_fn
(
loops
);
delay_fn
(
loops
);
}
}
EXPORT_SYMBOL
(
__delay
);
inline
void
__const_udelay
(
unsigned
long
xloops
)
inline
void
__const_udelay
(
unsigned
long
xloops
)
{
{
int
d0
;
int
d0
;
xloops
*=
4
;
xloops
*=
4
;
__asm__
(
"mull %0
"
asm
(
"mull %%edx
"
:
"=d"
(
xloops
),
"=&a"
(
d0
)
:
"=d"
(
xloops
),
"=&a"
(
d0
)
:
"1"
(
xloops
),
"0"
:
"1"
(
xloops
),
"0"
(
cpu_data
(
raw_smp_processor_id
()).
loops_per_jiffy
*
(
HZ
/
4
)));
(
cpu_data
(
raw_smp_processor_id
()).
loops_per_jiffy
*
(
HZ
/
4
)));
__delay
(
++
xloops
);
__delay
(
++
xloops
);
}
}
EXPORT_SYMBOL
(
__const_udelay
);
void
__udelay
(
unsigned
long
usecs
)
void
__udelay
(
unsigned
long
usecs
)
{
{
__const_udelay
(
usecs
*
0x000010c7
);
/* 2**32 / 1000000 (rounded up) */
__const_udelay
(
usecs
*
0x000010c7
);
/* 2**32 / 1000000 (rounded up) */
}
}
EXPORT_SYMBOL
(
__udelay
);
void
__ndelay
(
unsigned
long
nsecs
)
void
__ndelay
(
unsigned
long
nsecs
)
{
{
__const_udelay
(
nsecs
*
0x00005
);
/* 2**32 / 1000000000 (rounded up) */
__const_udelay
(
nsecs
*
0x00005
);
/* 2**32 / 1000000000 (rounded up) */
}
}
EXPORT_SYMBOL
(
__delay
);
EXPORT_SYMBOL
(
__const_udelay
);
EXPORT_SYMBOL
(
__udelay
);
EXPORT_SYMBOL
(
__ndelay
);
EXPORT_SYMBOL
(
__ndelay
);
arch/x86/lib/delay_64.c
deleted
100644 → 0
View file @
a737abd1
/*
* Precise Delay Loops for x86-64
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* The __delay function must _NOT_ be inlined as its execution time
* depends wildly on alignment on many x86 processors.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/timex.h>
#include <linux/preempt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/delay.h>
#include <asm/msr.h>
#ifdef CONFIG_SMP
#include <asm/smp.h>
#endif
int
__devinit
read_current_timer
(
unsigned
long
*
timer_value
)
{
rdtscll
(
*
timer_value
);
return
0
;
}
void
__delay
(
unsigned
long
loops
)
{
unsigned
bclock
,
now
;
int
cpu
;
preempt_disable
();
cpu
=
smp_processor_id
();
rdtscl
(
bclock
);
for
(;;)
{
rdtscl
(
now
);
if
((
now
-
bclock
)
>=
loops
)
break
;
/* Allow RT tasks to run */
preempt_enable
();
rep_nop
();
preempt_disable
();
/*
* It is possible that we moved to another CPU, and
* since TSC's are per-cpu we need to calculate
* that. The delay must guarantee that we wait "at
* least" the amount of time. Being moved to another
* CPU could make the wait longer but we just need to
* make sure we waited long enough. Rebalance the
* counter for this CPU.
*/
if
(
unlikely
(
cpu
!=
smp_processor_id
()))
{
loops
-=
(
now
-
bclock
);
cpu
=
smp_processor_id
();
rdtscl
(
bclock
);
}
}
preempt_enable
();
}
EXPORT_SYMBOL
(
__delay
);
inline
void
__const_udelay
(
unsigned
long
xloops
)
{
__delay
(((
xloops
*
HZ
*
cpu_data
(
raw_smp_processor_id
()).
loops_per_jiffy
)
>>
32
)
+
1
);
}
EXPORT_SYMBOL
(
__const_udelay
);
void
__udelay
(
unsigned
long
usecs
)
{
__const_udelay
(
usecs
*
0x000010c7
);
/* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL
(
__udelay
);
void
__ndelay
(
unsigned
long
nsecs
)
{
__const_udelay
(
nsecs
*
0x00005
);
/* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL
(
__ndelay
);
arch/x86/lib/getuser
_64
.S
→
arch/x86/lib/getuser.S
View file @
de989ef0
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
*
*
*
(
C
)
Copyright
1998
Linus
Torvalds
*
(
C
)
Copyright
1998
Linus
Torvalds
*
(
C
)
Copyright
2005
Andi
Kleen
*
(
C
)
Copyright
2005
Andi
Kleen
*
(
C
)
Copyright
2008
Glauber
Costa
*
*
*
These
functions
have
a
non
-
standard
call
interface
*
These
functions
have
a
non
-
standard
call
interface
*
to
make
them
more
efficient
,
especially
as
they
*
to
make
them
more
efficient
,
especially
as
they
...
@@ -13,14 +14,13 @@
...
@@ -13,14 +14,13 @@
/*
/*
*
__get_user_X
*
__get_user_X
*
*
*
Inputs
:
%
rc
x
contains
the
address
.
*
Inputs
:
%
[
r
|
e
]
a
x
contains
the
address
.
*
The
register
is
modified
,
but
all
changes
are
undone
*
The
register
is
modified
,
but
all
changes
are
undone
*
before
returning
because
the
C
code
doesn
't know about it.
*
before
returning
because
the
C
code
doesn
't know about it.
*
*
*
Outputs
:
%
r
ax
is
error
code
(
0
or
-
EFAULT
)
*
Outputs
:
%
[
r
|
e
]
ax
is
error
code
(
0
or
-
EFAULT
)
*
%
r
dx
contains
zero
-
extended
value
*
%
[
r
|
e
]
dx
contains
zero
-
extended
value
*
*
*
%
r8
is
destroyed
.
*
*
*
These
functions
should
not
modify
any
other
registers
,
*
These
functions
should
not
modify
any
other
registers
,
*
as
they
get
called
from
within
inline
assembly
.
*
as
they
get
called
from
within
inline
assembly
.
...
@@ -32,78 +32,73 @@
...
@@ -32,78 +32,73 @@
#include <asm/errno.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
.
text
.
text
ENTRY
(
__get_user_1
)
ENTRY
(
__get_user_1
)
CFI_STARTPROC
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
GET_THREAD_INFO
(%
_ASM_DX
)
cmp
q
threadinfo_addr_limit
(%
r8
),%
rcx
cmp
TI_addr_limit
(%
_ASM_DX
),%
_ASM_AX
jae
bad_get_user
jae
bad_get_user
1
:
movzb
(%
rcx
),%
edx
1
:
movzb
(%
_ASM_AX
),%
edx
xor
l
%
eax
,%
eax
xor
%
eax
,%
eax
ret
ret
CFI_ENDPROC
CFI_ENDPROC
ENDPROC
(
__get_user_1
)
ENDPROC
(
__get_user_1
)
ENTRY
(
__get_user_2
)
ENTRY
(
__get_user_2
)
CFI_STARTPROC
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
add
$
1
,%
_ASM_AX
addq
$
1
,%
rcx
jc
bad_get_user
jc
20
f
GET_THREAD_INFO
(%
_ASM_DX
)
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
cmp
TI_addr_limit
(%
_ASM_DX
),%
_ASM_AX
jae
20
f
jae
bad_get_user
decq
%
rcx
2
:
movzwl
-
1
(%
_ASM_AX
),%
edx
2
:
movzwl
(%
rcx
),%
edx
xor
%
eax
,%
eax
xorl
%
eax
,%
eax
ret
ret
20
:
decq
%
rcx
jmp
bad_get_user
CFI_ENDPROC
CFI_ENDPROC
ENDPROC
(
__get_user_2
)
ENDPROC
(
__get_user_2
)
ENTRY
(
__get_user_4
)
ENTRY
(
__get_user_4
)
CFI_STARTPROC
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
add
$
3
,%
_ASM_AX
addq
$
3
,%
rcx
jc
bad_get_user
jc
30
f
GET_THREAD_INFO
(%
_ASM_DX
)
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
cmp
TI_addr_limit
(%
_ASM_DX
),%
_ASM_AX
jae
30
f
jae
bad_get_user
subq
$
3
,%
rcx
3
:
mov
-
3
(%
_ASM_AX
),%
edx
3
:
movl
(%
rcx
),%
edx
xor
%
eax
,%
eax
xorl
%
eax
,%
eax
ret
ret
30
:
subq
$
3
,%
rcx
jmp
bad_get_user
CFI_ENDPROC
CFI_ENDPROC
ENDPROC
(
__get_user_4
)
ENDPROC
(
__get_user_4
)
#ifdef CONFIG_X86_64
ENTRY
(
__get_user_8
)
ENTRY
(
__get_user_8
)
CFI_STARTPROC
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
add
$
7
,%
_ASM_AX
addq
$
7
,%
rcx
jc
bad_get_user
jc
40
f
GET_THREAD_INFO
(%
_ASM_DX
)
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
cmp
TI_addr_limit
(%
_ASM_DX
),%
_ASM_AX
jae
40
f
jae
bad_get_user
subq
$
7
,%
rcx
4
:
movq
-
7
(%
_ASM_AX
),%
_ASM_DX
4
:
movq
(%
rcx
),%
rdx
xor
%
eax
,%
eax
xorl
%
eax
,%
eax
ret
ret
40
:
subq
$
7
,%
rcx
jmp
bad_get_user
CFI_ENDPROC
CFI_ENDPROC
ENDPROC
(
__get_user_8
)
ENDPROC
(
__get_user_8
)
#endif
bad_get_user
:
bad_get_user
:
CFI_STARTPROC
CFI_STARTPROC
xor
l
%
edx
,%
edx
xor
%
edx
,%
edx
mov
q
$
(-
EFAULT
),%
rax
mov
$
(-
EFAULT
),%
_ASM_AX
ret
ret
CFI_ENDPROC
CFI_ENDPROC
END
(
bad_get_user
)
END
(
bad_get_user
)
.
section
__ex_table
,"
a
"
.
section
__ex_table
,"
a
"
.
quad
1
b
,
bad_get_user
_ASM_PTR
1
b
,
bad_get_user
.
quad
2
b
,
bad_get_user
_ASM_PTR
2
b
,
bad_get_user
.
quad
3
b
,
bad_get_user
_ASM_PTR
3
b
,
bad_get_user
.
quad
4
b
,
bad_get_user
#ifdef CONFIG_X86_64
.
previous
_ASM_PTR
4
b
,
bad_get_user
#endif
arch/x86/lib/getuser_32.S
deleted
100644 → 0
View file @
a737abd1
/*
*
__get_user
functions
.
*
*
(
C
)
Copyright
1998
Linus
Torvalds
*
*
These
functions
have
a
non
-
standard
call
interface
*
to
make
them
more
efficient
,
especially
as
they
*
return
an
error
value
in
addition
to
the
"real"
*
return
value
.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h>
/*
*
__get_user_X
*
*
Inputs
:
%
eax
contains
the
address
*
*
Outputs
:
%
eax
is
error
code
(
0
or
-
EFAULT
)
*
%
edx
contains
zero
-
extended
value
*
*
These
functions
should
not
modify
any
other
registers
,
*
as
they
get
called
from
within
inline
assembly
.
*/
.
text
ENTRY
(
__get_user_1
)
CFI_STARTPROC
GET_THREAD_INFO
(%
edx
)
cmpl
TI_addr_limit
(%
edx
),%
eax
jae
bad_get_user
1
:
movzbl
(%
eax
),%
edx
xorl
%
eax
,%
eax
ret
CFI_ENDPROC
ENDPROC
(
__get_user_1
)
ENTRY
(
__get_user_2
)
CFI_STARTPROC
addl
$
1
,%
eax
jc
bad_get_user
GET_THREAD_INFO
(%
edx
)
cmpl
TI_addr_limit
(%
edx
),%
eax
jae
bad_get_user
2
:
movzwl
-
1
(%
eax
),%
edx
xorl
%
eax
,%
eax
ret
CFI_ENDPROC
ENDPROC
(
__get_user_2
)
ENTRY
(
__get_user_4
)
CFI_STARTPROC
addl
$
3
,%
eax
jc
bad_get_user
GET_THREAD_INFO
(%
edx
)
cmpl
TI_addr_limit
(%
edx
),%
eax
jae
bad_get_user
3
:
movl
-
3
(%
eax
),%
edx
xorl
%
eax
,%
eax
ret
CFI_ENDPROC
ENDPROC
(
__get_user_4
)
bad_get_user
:
CFI_STARTPROC
xorl
%
edx
,%
edx
movl
$
-
14
,%
eax
ret
CFI_ENDPROC
END
(
bad_get_user
)
.
section
__ex_table
,"
a
"
.
long
1
b
,
bad_get_user
.
long
2
b
,
bad_get_user
.
long
3
b
,
bad_get_user
.
previous
arch/x86/lib/putuser
_32
.S
→
arch/x86/lib/putuser.S
View file @
de989ef0
...
@@ -2,6 +2,8 @@
...
@@ -2,6 +2,8 @@
*
__put_user
functions
.
*
__put_user
functions
.
*
*
*
(
C
)
Copyright
2005
Linus
Torvalds
*
(
C
)
Copyright
2005
Linus
Torvalds
*
(
C
)
Copyright
2005
Andi
Kleen
*
(
C
)
Copyright
2008
Glauber
Costa
*
*
*
These
functions
have
a
non
-
standard
call
interface
*
These
functions
have
a
non
-
standard
call
interface
*
to
make
them
more
efficient
,
especially
as
they
*
to
make
them
more
efficient
,
especially
as
they
...
@@ -11,6 +13,8 @@
...
@@ -11,6 +13,8 @@
#include <linux/linkage.h>
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/asm.h>
/*
/*
...
@@ -26,73 +30,68 @@
...
@@ -26,73 +30,68 @@
*/
*/
#define ENTER CFI_STARTPROC ; \
#define ENTER CFI_STARTPROC ; \
pushl
%
ebx
; \
GET_THREAD_INFO
(%
_ASM_BX
)
CFI_ADJUST_CFA_OFFSET
4
; \
#define EXIT ret ; \
CFI_REL_OFFSET
ebx
,
0
; \
GET_THREAD_INFO
(%
ebx
)
#define EXIT popl %ebx ; \
CFI_ADJUST_CFA_OFFSET
-
4
; \
CFI_RESTORE
ebx
; \
ret
; \
CFI_ENDPROC
CFI_ENDPROC
.
text
.
text
ENTRY
(
__put_user_1
)
ENTRY
(
__put_user_1
)
ENTER
ENTER
cmp
l
TI_addr_limit
(%
ebx
),%
ecx
cmp
TI_addr_limit
(%
_ASM_BX
),%
_ASM_CX
jae
bad_put_user
jae
bad_put_user
1
:
movb
%
al
,(%
ecx
)
1
:
movb
%
al
,(%
_ASM_CX
)
xor
l
%
eax
,%
eax
xor
%
eax
,%
eax
EXIT
EXIT
ENDPROC
(
__put_user_1
)
ENDPROC
(
__put_user_1
)
ENTRY
(
__put_user_2
)
ENTRY
(
__put_user_2
)
ENTER
ENTER
mov
l
TI_addr_limit
(%
ebx
),%
ebx
mov
TI_addr_limit
(%
_ASM_BX
),%
_ASM_BX
sub
l
$
1
,%
ebx
sub
$
1
,%
_ASM_BX
cmp
l
%
ebx
,%
ecx
cmp
%
_ASM_BX
,%
_ASM_CX
jae
bad_put_user
jae
bad_put_user
2
:
movw
%
ax
,(%
ecx
)
2
:
movw
%
ax
,(%
_ASM_CX
)
xor
l
%
eax
,%
eax
xor
%
eax
,%
eax
EXIT
EXIT
ENDPROC
(
__put_user_2
)
ENDPROC
(
__put_user_2
)
ENTRY
(
__put_user_4
)
ENTRY
(
__put_user_4
)
ENTER
ENTER
mov
l
TI_addr_limit
(%
ebx
),%
ebx
mov
TI_addr_limit
(%
_ASM_BX
),%
_ASM_BX
sub
l
$
3
,%
ebx
sub
$
3
,%
_ASM_BX
cmp
l
%
ebx
,%
ecx
cmp
%
_ASM_BX
,%
_ASM_CX
jae
bad_put_user
jae
bad_put_user
3
:
movl
%
eax
,(%
ecx
)
3
:
movl
%
eax
,(%
_ASM_CX
)
xor
l
%
eax
,%
eax
xor
%
eax
,%
eax
EXIT
EXIT
ENDPROC
(
__put_user_4
)
ENDPROC
(
__put_user_4
)
ENTRY
(
__put_user_8
)
ENTRY
(
__put_user_8
)
ENTER
ENTER
mov
l
TI_addr_limit
(%
ebx
),%
ebx
mov
TI_addr_limit
(%
_ASM_BX
),%
_ASM_BX
sub
l
$
7
,%
ebx
sub
$
7
,%
_ASM_BX
cmp
l
%
ebx
,%
ecx
cmp
%
_ASM_BX
,%
_ASM_CX
jae
bad_put_user
jae
bad_put_user
4
:
movl
%
eax
,(%
ecx
)
4
:
mov
%
_ASM_AX
,(%
_ASM_CX
)
5
:
movl
%
edx
,
4
(%
ecx
)
#ifdef CONFIG_X86_32
xorl
%
eax
,%
eax
5
:
movl
%
edx
,
4
(%
_ASM_CX
)
#endif
xor
%
eax
,%
eax
EXIT
EXIT
ENDPROC
(
__put_user_8
)
ENDPROC
(
__put_user_8
)
bad_put_user
:
bad_put_user
:
CFI_STARTPROC
simple
CFI_STARTPROC
CFI_DEF_CFA
esp
,
2
*
4
movl
$
-
EFAULT
,%
eax
CFI_OFFSET
eip
,
-
1
*
4
CFI_OFFSET
ebx
,
-
2
*
4
movl
$
-
14
,%
eax
EXIT
EXIT
END
(
bad_put_user
)
END
(
bad_put_user
)
.
section
__ex_table
,"
a
"
.
section
__ex_table
,"
a
"
.
long
1
b
,
bad_put_user
_ASM_PTR
1
b
,
bad_put_user
.
long
2
b
,
bad_put_user
_ASM_PTR
2
b
,
bad_put_user
.
long
3
b
,
bad_put_user
_ASM_PTR
3
b
,
bad_put_user
.
long
4
b
,
bad_put_user
_ASM_PTR
4
b
,
bad_put_user
.
long
5
b
,
bad_put_user
#ifdef CONFIG_X86_32
_ASM_PTR
5
b
,
bad_put_user
#endif
.
previous
.
previous
arch/x86/lib/putuser_64.S
deleted
100644 → 0
View file @
a737abd1
/*
*
__put_user
functions
.
*
*
(
C
)
Copyright
1998
Linus
Torvalds
*
(
C
)
Copyright
2005
Andi
Kleen
*
*
These
functions
have
a
non
-
standard
call
interface
*
to
make
them
more
efficient
,
especially
as
they
*
return
an
error
value
in
addition
to
the
"real"
*
return
value
.
*/
/*
*
__put_user_X
*
*
Inputs
:
%
rcx
contains
the
address
*
%
rdx
contains
new
value
*
*
Outputs
:
%
rax
is
error
code
(
0
or
-
EFAULT
)
*
*
%
r8
is
destroyed
.
*
*
These
functions
should
not
modify
any
other
registers
,
*
as
they
get
called
from
within
inline
assembly
.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
.
text
ENTRY
(
__put_user_1
)
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
jae
bad_put_user
1
:
movb
%
dl
,(%
rcx
)
xorl
%
eax
,%
eax
ret
CFI_ENDPROC
ENDPROC
(
__put_user_1
)
ENTRY
(
__put_user_2
)
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
addq
$
1
,%
rcx
jc
20
f
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
jae
20
f
decq
%
rcx
2
:
movw
%
dx
,(%
rcx
)
xorl
%
eax
,%
eax
ret
20
:
decq
%
rcx
jmp
bad_put_user
CFI_ENDPROC
ENDPROC
(
__put_user_2
)
ENTRY
(
__put_user_4
)
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
addq
$
3
,%
rcx
jc
30
f
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
jae
30
f
subq
$
3
,%
rcx
3
:
movl
%
edx
,(%
rcx
)
xorl
%
eax
,%
eax
ret
30
:
subq
$
3
,%
rcx
jmp
bad_put_user
CFI_ENDPROC
ENDPROC
(
__put_user_4
)
ENTRY
(
__put_user_8
)
CFI_STARTPROC
GET_THREAD_INFO
(%
r8
)
addq
$
7
,%
rcx
jc
40
f
cmpq
threadinfo_addr_limit
(%
r8
),%
rcx
jae
40
f
subq
$
7
,%
rcx
4
:
movq
%
rdx
,(%
rcx
)
xorl
%
eax
,%
eax
ret
40
:
subq
$
7
,%
rcx
jmp
bad_put_user
CFI_ENDPROC
ENDPROC
(
__put_user_8
)
bad_put_user
:
CFI_STARTPROC
movq
$
(-
EFAULT
),%
rax
ret
CFI_ENDPROC
END
(
bad_put_user
)
.
section
__ex_table
,"
a
"
.
quad
1
b
,
bad_put_user
.
quad
2
b
,
bad_put_user
.
quad
3
b
,
bad_put_user
.
quad
4
b
,
bad_put_user
.
previous
include/asm-x86/asm.h
View file @
de989ef0
...
@@ -3,8 +3,10 @@
...
@@ -3,8 +3,10 @@
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
# define __ASM_FORM(x) x
# define __ASM_EX_SEC .section __ex_table
#else
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_FORM(x) " " #x " "
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif
#endif
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
...
@@ -14,6 +16,7 @@
...
@@ -14,6 +16,7 @@
#endif
#endif
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
...
@@ -24,10 +27,14 @@
...
@@ -24,10 +27,14 @@
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
#define _ASM_XADD __ASM_SIZE(xadd)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
/* Exception table entry */
/* Exception table entry */
# define _ASM_EXTABLE(from,to) \
# define _ASM_EXTABLE(from,to) \
" .section __ex_table,\"a\"\n"
\
__ASM_EX_SEC
\
_ASM_ALIGN "\n" \
_ASM_ALIGN "\n" \
_ASM_PTR #from "," #to "\n" \
_ASM_PTR #from "," #to "\n" \
" .previous\n"
" .previous\n"
...
...
include/asm-x86/delay.h
View file @
de989ef0
...
@@ -26,10 +26,6 @@ extern void __delay(unsigned long loops);
...
@@ -26,10 +26,6 @@ extern void __delay(unsigned long loops);
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
__ndelay(n))
#ifdef CONFIG_X86_32
void
use_tsc_delay
(
void
);
void
use_tsc_delay
(
void
);
#else
#define use_tsc_delay() {}
#endif
#endif
/* _ASM_X86_DELAY_H */
#endif
/* _ASM_X86_DELAY_H */
include/asm-x86/uaccess.h
View file @
de989ef0
This diff is collapsed.
Click to expand it.
include/asm-x86/uaccess_32.h
View file @
de989ef0
This diff is collapsed.
Click to expand it.
include/asm-x86/uaccess_64.h
View file @
de989ef0
...
@@ -9,265 +9,6 @@
...
@@ -9,265 +9,6 @@
#include <linux/prefetch.h>
#include <linux/prefetch.h>
#include <asm/page.h>
#include <asm/page.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
#define __addr_ok(addr) (!((unsigned long)(addr) & \
(current_thread_info()->addr_limit.seg)))
/*
* Uhhuh, this needs 65-bit arithmetic. We have a carry..
*/
#define __range_not_ok(addr, size) \
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("# range_ok\n\r" \
"addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
: "=&r" (flag), "=r" (roksum) \
: "1" (addr), "g" ((long)(size)), \
"g" (current_thread_info()->addr_limit.seg)); \
flag; \
})
#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct
exception_table_entry
{
unsigned
long
insn
,
fixup
;
};
extern
int
fixup_exception
(
struct
pt_regs
*
regs
);
#define ARCH_HAS_SEARCH_EXTABLE
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
* This gets kind of ugly. We want to return _two_ values in "get_user()"
* and yet we don't want to do any pointers, because that is too much
* of a performance impact. Thus we have a few rather ugly macros here,
* and hide all the ugliness from the user.
*
* The "__xxx" versions of the user access functions are versions that
* do not verify the address space, that must have been done previously
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*/
#define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \
: "=a" (ret),"=d" (x) \
: "c" (ptr) \
: "r8")
/* Careful: we have to cast the result to the type of the pointer
* for sign reasons */
#define get_user(x, ptr) \
({ \
unsigned long __val_gu; \
int __ret_gu; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \
break; \
case 2: \
__get_user_x(2, __ret_gu, __val_gu, ptr); \
break; \
case 4: \
__get_user_x(4, __ret_gu, __val_gu, ptr); \
break; \
case 8: \
__get_user_x(8, __ret_gu, __val_gu, ptr); \
break; \
default: \
__get_user_bad(); \
break; \
} \
(x) = (__force typeof(*(ptr)))__val_gu; \
__ret_gu; \
})
extern
void
__put_user_1
(
void
);
extern
void
__put_user_2
(
void
);
extern
void
__put_user_4
(
void
);
extern
void
__put_user_8
(
void
);
extern
void
__put_user_bad
(
void
);
#define __put_user_x(size, ret, x, ptr) \
asm volatile("call __put_user_" #size \
:"=a" (ret) \
:"c" (ptr),"d" (x) \
:"r8")
#define put_user(x, ptr) \
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
__put_user_size((x), (ptr), (size), __pu_err); \
__pu_err; \
})
#define __put_user_check(x, ptr, size) \
({ \
int __pu_err; \
typeof(*(ptr)) __user *__pu_addr = (ptr); \
switch (size) { \
case 1: \
__put_user_x(1, __pu_err, x, __pu_addr); \
break; \
case 2: \
__put_user_x(2, __pu_err, x, __pu_addr); \
break; \
case 4: \
__put_user_x(4, __pu_err, x, __pu_addr); \
break; \
case 8: \
__put_user_x(8, __pu_err, x, __pu_addr); \
break; \
default: \
__put_user_bad(); \
} \
__pu_err; \
})
#define __put_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
break; \
case 2: \
__put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
break; \
case 4: \
__put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
break; \
case 8: \
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
/* FIXME: this hack is definitely wrong -AK */
struct
__large_struct
{
unsigned
long
buf
[
100
];
};
#define __m(x) (*(struct __large_struct __user *)(x))
/*
* Tell gcc we read from memory instead of writing: this is because
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
asm volatile("1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
".section .fixup, \"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
: ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
#define __get_user_nocheck(x, ptr, size) \
({ \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force typeof(*(ptr)))__gu_val; \
__gu_err; \
})
extern
int
__get_user_1
(
void
);
extern
int
__get_user_2
(
void
);
extern
int
__get_user_4
(
void
);
extern
int
__get_user_8
(
void
);
extern
int
__get_user_bad
(
void
);
#define __get_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
break; \
case 2: \
__get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
break; \
case 4: \
__get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
break; \
case 8: \
__get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
asm volatile("1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup, \"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (err), ltype (x) \
: "m" (__m(addr)), "i"(errno), "0"(err))
/*
/*
* Copy To/From Userspace
* Copy To/From Userspace
*/
*/
...
@@ -437,7 +178,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
...
@@ -437,7 +178,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
return
copy_user_generic
((
__force
void
*
)
dst
,
src
,
size
);
return
copy_user_generic
((
__force
void
*
)
dst
,
src
,
size
);
}
}
#define ARCH_HAS_NOCACHE_UACCESS 1
extern
long
__copy_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
extern
long
__copy_user_nocache
(
void
*
dst
,
const
void
__user
*
src
,
unsigned
size
,
int
zerorest
);
unsigned
size
,
int
zerorest
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment