Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
708df1b0
Commit
708df1b0
authored
Aug 19, 2003
by
Suresh B. Siddha
Committed by
David Mosberger
Aug 19, 2003
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] ia64: cleanup inline assembly
parent
cec5d408
Changes
52
Hide whitespace changes
Inline
Side-by-side
Showing
52 changed files
with
1377 additions
and
900 deletions
+1377
-900
arch/ia64/boot/Makefile
arch/ia64/boot/Makefile
+1
-1
arch/ia64/boot/bootloader.c
arch/ia64/boot/bootloader.c
+12
-8
arch/ia64/boot/fw-emu.S
arch/ia64/boot/fw-emu.S
+119
-0
arch/ia64/hp/sim/Makefile
arch/ia64/hp/sim/Makefile
+1
-1
arch/ia64/hp/sim/hpsim.S
arch/ia64/hp/sim/hpsim.S
+11
-0
arch/ia64/hp/sim/hpsim_setup.c
arch/ia64/hp/sim/hpsim_setup.c
+0
-13
arch/ia64/ia32/ia32_signal.c
arch/ia64/ia32/ia32_signal.c
+60
-80
arch/ia64/ia32/ia32_support.c
arch/ia64/ia32/ia32_support.c
+12
-20
arch/ia64/ia32/ia32_traps.c
arch/ia64/ia32/ia32_traps.c
+3
-3
arch/ia64/ia32/ia32priv.h
arch/ia64/ia32/ia32priv.h
+13
-11
arch/ia64/ia32/sys_ia32.c
arch/ia64/ia32/sys_ia32.c
+3
-2
arch/ia64/kernel/entry.S
arch/ia64/kernel/entry.S
+12
-0
arch/ia64/kernel/fw-emu.c
arch/ia64/kernel/fw-emu.c
+6
-112
arch/ia64/kernel/init_task.c
arch/ia64/kernel/init_task.c
+1
-1
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+4
-4
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+7
-6
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+12
-12
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+28
-16
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+5
-5
arch/ia64/kernel/signal.c
arch/ia64/kernel/signal.c
+4
-1
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+16
-14
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/unaligned.c
+29
-20
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+4
-8
arch/ia64/sn/fakeprom/fw-emu.c
arch/ia64/sn/fakeprom/fw-emu.c
+6
-6
arch/ia64/sn/kernel/irq.c
arch/ia64/sn/kernel/irq.c
+6
-6
arch/ia64/sn/kernel/setup.c
arch/ia64/sn/kernel/setup.c
+1
-1
arch/ia64/sn/kernel/sn2/io.c
arch/ia64/sn/kernel/sn2/io.c
+31
-39
arch/ia64/vmlinux.lds.S
arch/ia64/vmlinux.lds.S
+3
-2
include/asm-ia64/atomic.h
include/asm-ia64/atomic.h
+4
-4
include/asm-ia64/bitops.h
include/asm-ia64/bitops.h
+4
-4
include/asm-ia64/byteorder.h
include/asm-ia64/byteorder.h
+2
-1
include/asm-ia64/current.h
include/asm-ia64/current.h
+2
-1
include/asm-ia64/delay.h
include/asm-ia64/delay.h
+17
-13
include/asm-ia64/gcc_intrin.h
include/asm-ia64/gcc_intrin.h
+633
-0
include/asm-ia64/ia64regs.h
include/asm-ia64/ia64regs.h
+98
-0
include/asm-ia64/intrinsics.h
include/asm-ia64/intrinsics.h
+49
-52
include/asm-ia64/io.h
include/asm-ia64/io.h
+2
-1
include/asm-ia64/machvec.h
include/asm-ia64/machvec.h
+1
-1
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+0
-2
include/asm-ia64/page.h
include/asm-ia64/page.h
+2
-1
include/asm-ia64/pal.h
include/asm-ia64/pal.h
+2
-2
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+80
-323
include/asm-ia64/rwsem.h
include/asm-ia64/rwsem.h
+7
-4
include/asm-ia64/sal.h
include/asm-ia64/sal.h
+4
-0
include/asm-ia64/smp.h
include/asm-ia64/smp.h
+1
-1
include/asm-ia64/sn/sn2/io.h
include/asm-ia64/sn/sn2/io.h
+23
-11
include/asm-ia64/sn/sn_cpuid.h
include/asm-ia64/sn/sn_cpuid.h
+3
-3
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock.h
+5
-3
include/asm-ia64/system.h
include/asm-ia64/system.h
+20
-20
include/asm-ia64/timex.h
include/asm-ia64/timex.h
+2
-1
include/asm-ia64/tlbflush.h
include/asm-ia64/tlbflush.h
+2
-1
include/asm-ia64/unistd.h
include/asm-ia64/unistd.h
+4
-59
No files found.
arch/ia64/boot/Makefile
View file @
708df1b0
...
...
@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader
=
-static
-T
$(obj)/bootloader
:
$(src)/bootloader.lds $(obj)/bootloader.o
\
$(obj)/bootloader
:
$(src)/bootloader.lds $(obj)/bootloader.o
$(obj)/fw-emu.o
\
lib/lib.a arch/ia64/lib/lib.a FORCE
$(
call
if_changed,ld
)
arch/ia64/boot/bootloader.c
View file @
708df1b0
...
...
@@ -21,6 +21,7 @@ struct task_struct; /* forward declaration for elf.h */
#include <asm/pgtable.h>
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/intrinsics.h>
/* Simulator system calls: */
...
...
@@ -54,9 +55,9 @@ struct disk_stat {
};
#include "../kernel/fw-emu.c"
extern
void
jmp_to_kernel
(
ulong
sp
,
ulong
bp
,
ulong
e_entry
);
extern
void
__bsw1
(
void
);
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
asm
(
".global printk; printk = 0"
);
/*
* Set a break point on this function so that symbols are available to set breakpoints in
...
...
@@ -98,9 +99,12 @@ _start (void)
char
*
kpath
,
*
args
;
long
arglen
=
0
;
asm
volatile
(
"movl gp=__gp;;"
:::
"memory"
);
asm
volatile
(
"mov sp=%0"
::
"r"
(
stack
)
:
"memory"
);
asm
volatile
(
"bsw.1;;"
);
extern
__u64
__gp
;
register
unsigned
long
tmp
=
(
unsigned
long
)
&
stack
[
0
];
ia64_setreg
(
_IA64_REG_GP
,
__gp
);
ia64_setreg
(
_IA64_REG_SP
,
tmp
);
__bsw1
();
ssc
(
0
,
0
,
0
,
0
,
SSC_CONSOLE_INIT
);
...
...
@@ -195,15 +199,15 @@ _start (void)
cons_write
(
"starting kernel...
\n
"
);
/* fake an I/O base address: */
asm
volatile
(
"mov ar.k0=%0"
::
"r"
(
0xffffc000000UL
)
);
ia64_setreg
(
_IA64_REG_AR_KR0
,
0xffffc000000UL
);
bp
=
sys_fw_init
(
args
,
arglen
);
ssc
(
0
,
(
long
)
kpath
,
0
,
0
,
SSC_LOAD_SYMBOLS
);
debug_break
();
asm
volatile
(
"mov sp=%2; mov r28=%1; br.sptk.few %0"
::
"b"
(
e_entry
),
"r"
(
bp
),
"r"
(
__pa
(
&
stack
))
);
tmp
=
__pa
(
&
stack
);
jmp_to_kernel
(
tmp
,
(
unsigned
long
)
bp
,
e_entry
);
cons_write
(
"kernel returned!
\n
"
);
ssc
(
-
1
,
0
,
0
,
0
,
SSC_EXIT
);
...
...
arch/ia64/boot/fw-emu.S
0 → 100644
View file @
708df1b0
#include <asm/asmmacro.h>
GLOBAL_ENTRY
(
ssc
)
.
regstk
5
,
0
,
0
,
0
mov
r15
=
in4
break
0x80001
br.ret.sptk.many
b0
END
(
ssc
)
GLOBAL_ENTRY
(
pal_emulator_static
)
mov
r8
=-
1
mov
r9
=
256
;;
cmp.gtu
p6
,
p7
=
r9
,
r28
/*
r28
<=
255
?
*/
(
p6
)
br.cond.sptk.few
static
;;
mov
r9
=
512
;;
cmp.gtu
p6
,
p7
=
r9
,
r28
(
p6
)
br.cond.sptk.few
stacked
;;
static
:
cmp.eq
p6
,
p7
=
6
,
r28
/*
PAL_PTCE_INFO
*/
(
p7
)
br.cond.sptk.few
1
f
;;
mov
r8
=
0
/*
status
=
0
*/
movl
r9
=
0x100000000
/*
tc
.
base
*/
movl
r10
=
0x0000000200000003
/*
count
[
0
],
count
[
1
]
*/
movl
r11
=
0x1000000000002000
/*
stride
[
0
],
stride
[
1
]
*/
br.cond.sptk.few
rp
1
:
cmp.eq
p6
,
p7
=
14
,
r28
/*
PAL_FREQ_RATIOS
*/
(
p7
)
br.cond.sptk.few
1
f
mov
r8
=
0
/*
status
=
0
*/
movl
r9
=
0x100000064
/*
proc_ratio
(
1
/
100
)
*/
movl
r10
=
0x100000100
/*
bus_ratio
<<
32
(
1
/
256
)
*/
movl
r11
=
0x100000064
/*
itc_ratio
<<
32
(
1
/
100
)
*/
;;
1
:
cmp.eq
p6
,
p7
=
19
,
r28
/*
PAL_RSE_INFO
*/
(
p7
)
br.cond.sptk.few
1
f
mov
r8
=
0
/*
status
=
0
*/
mov
r9
=
96
/*
num
phys
stacked
*/
mov
r10
=
0
/*
hints
*/
mov
r11
=
0
br.cond.sptk.few
rp
1
:
cmp.eq
p6
,
p7
=
1
,
r28
/*
PAL_CACHE_FLUSH
*/
(
p7
)
br.cond.sptk.few
1
f
mov
r9
=
ar
.
lc
movl
r8
=
524288
/*
flush
512
k
million
cache
lines
(
16
MB
)
*/
;;
mov
ar
.
lc
=
r8
movl
r8
=
0xe000000000000000
;;
.
loop
:
fc
r8
add
r8
=
32
,
r8
br.cloop.sptk.few
.
loop
sync.i
;;
srlz.i
;;
mov
ar
.
lc
=
r9
mov
r8
=
r0
;;
1
:
cmp.eq
p6
,
p7
=
15
,
r28
/*
PAL_PERF_MON_INFO
*/
(
p7
)
br.cond.sptk.few
1
f
mov
r8
=
0
/*
status
=
0
*/
movl
r9
=
0x12082004
/*
generic
=
4
width
=
32
retired
=
8
cycles
=
18
*/
mov
r10
=
0
/*
reserved
*/
mov
r11
=
0
/*
reserved
*/
mov
r16
=
0xffff
/*
implemented
PMC
*/
mov
r17
=
0xffff
/*
implemented
PMD
*/
add
r18
=
8
,
r29
/*
second
index
*/
;;
st8
[
r29
]=
r16
,
16
/*
store
implemented
PMC
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
st8
[
r29
]=
r0
,
16
/*
store
implemented
PMC
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
st8
[
r29
]=
r17
,
16
/*
store
implemented
PMD
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
mov
r16
=
0xf0
/*
cycles
count
capable
PMC
*/
;;
st8
[
r29
]=
r0
,
16
/*
store
implemented
PMC
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
mov
r17
=
0x10
/*
retired
bundles
capable
PMC
*/
;;
st8
[
r29
]=
r16
,
16
/*
store
cycles
capable
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
st8
[
r29
]=
r0
,
16
/*
store
implemented
PMC
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
st8
[
r29
]=
r17
,
16
/*
store
retired
bundle
capable
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
st8
[
r29
]=
r0
,
16
/*
store
implemented
PMC
*/
st8
[
r18
]=
r0
,
16
/*
clear
remaining
bits
*/
;;
1
:
br.cond.sptk.few
rp
stacked
:
br.ret.sptk.few
rp
END
(
pal_emulator_static
)
GLOBAL_ENTRY
(
jmp_to_kernel
)
.
regstk
3
,
0
,
0
,
0
mov
sp
=
in0
mov
r28
=
in1
mov
b7
=
in0
br.sptk.few
b7
END
(
jmp_to_kernel
)
GLOBAL_ENTRY
(
__bsw1
)
bsw.
1
;;
br.ret.sptk.many
b0
END
(
__bsw1
)
/*
This
needs
to
be
defined
because
lib
/
string
.
c
:
strlcat
()
calls
it
in
case
of
error
...
*/
.
global
printk
; printk = 0
arch/ia64/hp/sim/Makefile
View file @
708df1b0
...
...
@@ -7,7 +7,7 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
obj-y
:=
hpsim_irq.o hpsim_setup.o
obj-y
:=
hpsim_irq.o hpsim_setup.o
hpsim.o
obj-$(CONFIG_IA64_GENERIC)
+=
hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH)
+=
simeth.o
...
...
arch/ia64/hp/sim/hpsim.S
0 → 100644
View file @
708df1b0
#include <asm/asmmacro.h>
/*
*
Simulator
system
call
.
*/
GLOBAL_ENTRY
(
ia64_ssc
)
mov
r15
=
r36
break
0x80001
br.ret.sptk.many
rp
END
(
ia64_ssc
)
arch/ia64/hp/sim/hpsim_setup.c
View file @
708df1b0
...
...
@@ -25,19 +25,6 @@
#include "hpsim_ssc.h"
/*
* Simulator system call.
*/
asm
(
".text
\n
"
".align 32
\n
"
".global ia64_ssc
\n
"
".proc ia64_ssc
\n
"
"ia64_ssc:
\n
"
"mov r15=r36
\n
"
"break 0x80001
\n
"
"br.ret.sptk.many rp
\n
"
".endp
\n
"
);
void
ia64_ssc_connect_irq
(
long
intr
,
long
irq
)
{
...
...
arch/ia64/ia32/ia32_signal.c
View file @
708df1b0
...
...
@@ -41,6 +41,8 @@
#define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register
double
f16
asm
(
"f16"
);
register
double
f17
asm
(
"f17"
);
register
double
f18
asm
(
"f18"
);
register
double
f19
asm
(
"f19"
);
register
double
f20
asm
(
"f20"
);
register
double
f21
asm
(
"f21"
);
...
...
@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register
double
f26
asm
(
"f26"
);
register
double
f27
asm
(
"f27"
);
register
double
f28
asm
(
"f28"
);
register
double
f29
asm
(
"f29"
);
register
double
f30
asm
(
"f30"
);
register
double
f31
asm
(
"f31"
);
#endif
struct
sigframe_ia32
{
...
...
@@ -198,30 +201,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* All other fields unused...
*/
#define __ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define __stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
static
int
save_ia32_fpstate_live
(
struct
_fpstate_ia32
*
save
)
{
...
...
@@ -239,17 +218,18 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
return
-
EFAULT
;
/* Readin fsr, fcr, fir, fdr and copy onto fpstate */
asm
volatile
(
"mov %0=ar.fsr;"
:
"=r"
(
fsr
));
asm
volatile
(
"mov %0=ar.fcr;"
:
"=r"
(
fcr
));
asm
volatile
(
"mov %0=ar.fir;"
:
"=r"
(
fir
));
asm
volatile
(
"mov %0=ar.fdr;"
:
"=r"
(
fdr
));
fsr
=
ia64_getreg
(
_IA64_REG_AR_FSR
);
fcr
=
ia64_getreg
(
_IA64_REG_AR_FCR
);
fir
=
ia64_getreg
(
_IA64_REG_AR_FIR
);
fdr
=
ia64_getreg
(
_IA64_REG_AR_FDR
);
/*
* We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction.
*/
new_fsr
=
fsr
&
~
0x80ff
;
asm
volatile
(
"mov ar.fsr=%0;"
::
"r"
(
new_fsr
)
);
ia64_setreg
(
_IA64_REG_AR_FSR
,
new_fsr
);
__put_user
(
fcr
&
0xffff
,
&
save
->
cw
);
__put_user
(
fsr
&
0xffff
,
&
save
->
sw
);
...
...
@@ -286,45 +266,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia64f2ia32f
(
fpregp
,
&
ptp
->
f11
);
copy_to_user
(
&
save
->
_st
[(
3
+
fr8_st_map
)
&
0x7
],
fpregp
,
sizeof
(
struct
_fpreg_ia32
));
_
_stfe
(
fpregp
,
12
);
ia64
_stfe
(
fpregp
,
12
);
copy_to_user
(
&
save
->
_st
[(
4
+
fr8_st_map
)
&
0x7
],
fpregp
,
sizeof
(
struct
_fpreg_ia32
));
_
_stfe
(
fpregp
,
13
);
ia64
_stfe
(
fpregp
,
13
);
copy_to_user
(
&
save
->
_st
[(
5
+
fr8_st_map
)
&
0x7
],
fpregp
,
sizeof
(
struct
_fpreg_ia32
));
_
_stfe
(
fpregp
,
14
);
ia64
_stfe
(
fpregp
,
14
);
copy_to_user
(
&
save
->
_st
[(
6
+
fr8_st_map
)
&
0x7
],
fpregp
,
sizeof
(
struct
_fpreg_ia32
));
_
_stfe
(
fpregp
,
15
);
ia64
_stfe
(
fpregp
,
15
);
copy_to_user
(
&
save
->
_st
[(
7
+
fr8_st_map
)
&
0x7
],
fpregp
,
sizeof
(
struct
_fpreg_ia32
));
_
_stf8
(
&
num128
[
0
],
16
);
_
_stf8
(
&
num128
[
1
],
17
);
ia64
_stf8
(
&
num128
[
0
],
16
);
ia64
_stf8
(
&
num128
[
1
],
17
);
copy_to_user
(
&
save
->
_xmm
[
0
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
18
);
_
_stf8
(
&
num128
[
1
],
19
);
ia64
_stf8
(
&
num128
[
0
],
18
);
ia64
_stf8
(
&
num128
[
1
],
19
);
copy_to_user
(
&
save
->
_xmm
[
1
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
20
);
_
_stf8
(
&
num128
[
1
],
21
);
ia64
_stf8
(
&
num128
[
0
],
20
);
ia64
_stf8
(
&
num128
[
1
],
21
);
copy_to_user
(
&
save
->
_xmm
[
2
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
22
);
_
_stf8
(
&
num128
[
1
],
23
);
ia64
_stf8
(
&
num128
[
0
],
22
);
ia64
_stf8
(
&
num128
[
1
],
23
);
copy_to_user
(
&
save
->
_xmm
[
3
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
24
);
_
_stf8
(
&
num128
[
1
],
25
);
ia64
_stf8
(
&
num128
[
0
],
24
);
ia64
_stf8
(
&
num128
[
1
],
25
);
copy_to_user
(
&
save
->
_xmm
[
4
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
26
);
_
_stf8
(
&
num128
[
1
],
27
);
ia64
_stf8
(
&
num128
[
0
],
26
);
ia64
_stf8
(
&
num128
[
1
],
27
);
copy_to_user
(
&
save
->
_xmm
[
5
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
28
);
_
_stf8
(
&
num128
[
1
],
29
);
ia64
_stf8
(
&
num128
[
0
],
28
);
ia64
_stf8
(
&
num128
[
1
],
29
);
copy_to_user
(
&
save
->
_xmm
[
6
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
_
_stf8
(
&
num128
[
0
],
30
);
_
_stf8
(
&
num128
[
1
],
31
);
ia64
_stf8
(
&
num128
[
0
],
30
);
ia64
_stf8
(
&
num128
[
1
],
31
);
copy_to_user
(
&
save
->
_xmm
[
7
],
num128
,
sizeof
(
struct
_xmmreg_ia32
));
return
0
;
}
...
...
@@ -354,10 +334,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
* should remain same while writing.
* So, we do a read, change specific fields and write.
*/
asm
volatile
(
"mov %0=ar.fsr;"
:
"=r"
(
fsr
)
);
asm
volatile
(
"mov %0=ar.fcr;"
:
"=r"
(
fcr
)
);
asm
volatile
(
"mov %0=ar.fir;"
:
"=r"
(
fir
)
);
asm
volatile
(
"mov %0=ar.fdr;"
:
"=r"
(
fdr
)
);
fsr
=
ia64_getreg
(
_IA64_REG_AR_FSR
);
fcr
=
ia64_getreg
(
_IA64_REG_AR_FCR
);
fir
=
ia64_getreg
(
_IA64_REG_AR_FIR
);
fdr
=
ia64_getreg
(
_IA64_REG_AR_FDR
);
__get_user
(
mxcsr
,
(
unsigned
int
*
)
&
save
->
mxcsr
);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
...
...
@@ -391,10 +371,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64
=
(
num64
<<
32
)
|
lo
;
fdr
=
(
fdr
&
(
~
0xffffffffffff
))
|
num64
;
asm
volatile
(
"mov ar.fsr=%0;"
::
"r"
(
fsr
)
);
asm
volatile
(
"mov ar.fcr=%0;"
::
"r"
(
fcr
)
);
asm
volatile
(
"mov ar.fir=%0;"
::
"r"
(
fir
)
);
asm
volatile
(
"mov ar.fdr=%0;"
::
"r"
(
fdr
)
);
ia64_setreg
(
_IA64_REG_AR_FSR
,
fsr
);
ia64_setreg
(
_IA64_REG_AR_FCR
,
fcr
);
ia64_setreg
(
_IA64_REG_AR_FIR
,
fir
);
ia64_setreg
(
_IA64_REG_AR_FDR
,
fdr
);
/*
* restore f8..f11 onto pt_regs
...
...
@@ -420,45 +400,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f
(
&
ptp
->
f11
,
fpregp
);
copy_from_user
(
fpregp
,
&
save
->
_st
[(
4
+
fr8_st_map
)
&
0x7
],
sizeof
(
struct
_fpreg_ia32
));
_
_ldfe
(
12
,
fpregp
);
ia64
_ldfe
(
12
,
fpregp
);
copy_from_user
(
fpregp
,
&
save
->
_st
[(
5
+
fr8_st_map
)
&
0x7
],
sizeof
(
struct
_fpreg_ia32
));
_
_ldfe
(
13
,
fpregp
);
ia64
_ldfe
(
13
,
fpregp
);
copy_from_user
(
fpregp
,
&
save
->
_st
[(
6
+
fr8_st_map
)
&
0x7
],
sizeof
(
struct
_fpreg_ia32
));
_
_ldfe
(
14
,
fpregp
);
ia64
_ldfe
(
14
,
fpregp
);
copy_from_user
(
fpregp
,
&
save
->
_st
[(
7
+
fr8_st_map
)
&
0x7
],
sizeof
(
struct
_fpreg_ia32
));
_
_ldfe
(
15
,
fpregp
);
ia64
_ldfe
(
15
,
fpregp
);
copy_from_user
(
num128
,
&
save
->
_xmm
[
0
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
16
,
&
num128
[
0
]);
_
_ldf8
(
17
,
&
num128
[
1
]);
ia64
_ldf8
(
16
,
&
num128
[
0
]);
ia64
_ldf8
(
17
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
1
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
18
,
&
num128
[
0
]);
_
_ldf8
(
19
,
&
num128
[
1
]);
ia64
_ldf8
(
18
,
&
num128
[
0
]);
ia64
_ldf8
(
19
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
2
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
20
,
&
num128
[
0
]);
_
_ldf8
(
21
,
&
num128
[
1
]);
ia64
_ldf8
(
20
,
&
num128
[
0
]);
ia64
_ldf8
(
21
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
3
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
22
,
&
num128
[
0
]);
_
_ldf8
(
23
,
&
num128
[
1
]);
ia64
_ldf8
(
22
,
&
num128
[
0
]);
ia64
_ldf8
(
23
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
4
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
24
,
&
num128
[
0
]);
_
_ldf8
(
25
,
&
num128
[
1
]);
ia64
_ldf8
(
24
,
&
num128
[
0
]);
ia64
_ldf8
(
25
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
5
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
26
,
&
num128
[
0
]);
_
_ldf8
(
27
,
&
num128
[
1
]);
ia64
_ldf8
(
26
,
&
num128
[
0
]);
ia64
_ldf8
(
27
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
6
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
28
,
&
num128
[
0
]);
_
_ldf8
(
29
,
&
num128
[
1
]);
ia64
_ldf8
(
28
,
&
num128
[
0
]);
ia64
_ldf8
(
29
,
&
num128
[
1
]);
copy_from_user
(
num128
,
&
save
->
_xmm
[
7
],
sizeof
(
struct
_xmmreg_ia32
));
_
_ldf8
(
30
,
&
num128
[
0
]);
_
_ldf8
(
31
,
&
num128
[
1
]);
ia64
_ldf8
(
30
,
&
num128
[
0
]);
ia64
_ldf8
(
31
,
&
num128
[
1
]);
return
0
;
}
...
...
@@ -705,7 +685,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate
/*
* `eflags' is in an ar register for this context
*/
asm
volatile
(
"mov %0=ar.eflag ;;"
:
"=r"
(
flag
)
);
flag
=
ia64_getreg
(
_IA64_REG_AR_EFLAG
);
err
|=
__put_user
((
unsigned
int
)
flag
,
&
sc
->
eflags
);
err
|=
__put_user
(
regs
->
r12
,
&
sc
->
esp_at_signal
);
err
|=
__put_user
((
regs
->
r17
>>
16
)
&
0xffff
,
(
unsigned
int
*
)
&
sc
->
ss
);
...
...
@@ -790,10 +770,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
* IA32 process's context.
*/
err
|=
__get_user
(
tmpflags
,
&
sc
->
eflags
);
asm
volatile
(
"mov %0=ar.eflag ;;"
:
"=r"
(
flag
)
);
flag
=
ia64_getreg
(
_IA64_REG_AR_EFLAG
);
flag
&=
~
0x40DD5
;
flag
|=
(
tmpflags
&
0x40DD5
);
asm
volatile
(
"mov ar.eflag=%0 ;;"
::
"r"
(
flag
)
);
ia64_setreg
(
_IA64_REG_AR_EFLAG
,
flag
);
regs
->
r1
=
-
1
;
/* disable syscall checks, r1 is orig_eax */
}
...
...
arch/ia64/ia32/ia32_support.c
View file @
708df1b0
...
...
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/intrinsics.h>
#include "ia32priv.h"
...
...
@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task)
void
ia32_save_state
(
struct
task_struct
*
t
)
{
unsigned
long
eflag
,
fsr
,
fcr
,
fir
,
fdr
;
asm
(
"mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
:
"=r"
(
eflag
),
"=r"
(
fsr
),
"=r"
(
fcr
),
"=r"
(
fir
),
"=r"
(
fdr
));
t
->
thread
.
eflag
=
eflag
;
t
->
thread
.
fsr
=
fsr
;
t
->
thread
.
fcr
=
fcr
;
t
->
thread
.
fir
=
fir
;
t
->
thread
.
fdr
=
fdr
;
t
->
thread
.
eflag
=
ia64_getreg
(
_IA64_REG_AR_EFLAG
);
t
->
thread
.
fsr
=
ia64_getreg
(
_IA64_REG_AR_FSR
);
t
->
thread
.
fcr
=
ia64_getreg
(
_IA64_REG_AR_FCR
);
t
->
thread
.
fir
=
ia64_getreg
(
_IA64_REG_AR_FIR
);
t
->
thread
.
fdr
=
ia64_getreg
(
_IA64_REG_AR_FDR
);
ia64_set_kr
(
IA64_KR_IO_BASE
,
t
->
thread
.
old_iob
);
ia64_set_kr
(
IA64_KR_TSSD
,
t
->
thread
.
old_k1
);
}
...
...
@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t)
fdr
=
t
->
thread
.
fdr
;
tssd
=
load_desc
(
_TSS
(
nr
));
/* TSSD */
asm
volatile
(
"mov ar.eflag=%0;"
"mov ar.fsr=%1;"
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
::
"r"
(
eflag
),
"r"
(
fsr
),
"r"
(
fcr
),
"r"
(
fir
),
"r"
(
fdr
));
ia64_setreg
(
_IA64_REG_AR_EFLAG
,
eflag
);
ia64_setreg
(
_IA64_REG_AR_FSR
,
fsr
);
ia64_setreg
(
_IA64_REG_AR_FCR
,
fcr
);
ia64_setreg
(
_IA64_REG_AR_FIR
,
fir
);
ia64_setreg
(
_IA64_REG_AR_FDR
,
fdr
);
current
->
thread
.
old_iob
=
ia64_get_kr
(
IA64_KR_IO_BASE
);
current
->
thread
.
old_k1
=
ia64_get_kr
(
IA64_KR_TSSD
);
ia64_set_kr
(
IA64_KR_IO_BASE
,
IA32_IOBASE
);
...
...
@@ -178,7 +170,7 @@ void
ia32_cpu_init
(
void
)
{
/* initialize global ia32 state - CR0 and CR4 */
asm
volatile
(
"mov ar.cflg = %0"
::
"r"
(((
ulong
)
IA32_CR4
<<
32
)
|
IA32_CR0
));
ia64_setreg
(
_IA64_REG_AR_CFLAG
,
(((
ulong
)
IA32_CR4
<<
32
)
|
IA32_CR0
));
}
static
int
__init
...
...
arch/ia64/ia32/ia32_traps.c
View file @
708df1b0
...
...
@@ -15,6 +15,7 @@
#include "ia32priv.h"
#include <asm/ptrace.h>
#include <asm/intrinsics.h>
int
ia32_intercept
(
struct
pt_regs
*
regs
,
unsigned
long
isr
)
...
...
@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{
unsigned
long
fsr
,
fcr
;
asm
(
"mov %0=ar.fsr;"
"mov %1=ar.fcr;"
:
"=r"
(
fsr
),
"=r"
(
fcr
));
fsr
=
ia64_getreg
(
_IA64_REG_AR_FSR
);
fcr
=
ia64_getreg
(
_IA64_REG_AR_FCR
);
siginfo
.
si_signo
=
SIGFPE
;
/*
...
...
arch/ia64/ia32/ia32priv.h
View file @
708df1b0
...
...
@@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern
unsigned
long
ia32_do_mmap
(
struct
file
*
,
unsigned
long
,
unsigned
long
,
int
,
int
,
loff_t
);
extern
void
ia32_load_segment_descriptors
(
struct
task_struct
*
task
);
#define ia32f2ia64f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#define ia32f2ia64f(dst,src) \
do { \
ia64_ldfe(6,src); \
ia64_stop(); \
ia64_stf_spill(dst, 6); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
ia64_ldf_fill(6, src); \
ia64_stop(); \
ia64_stfe(dst, 6); \
} while(0)
struct
user_regs_struct32
{
__u32
ebx
,
ecx
,
edx
,
esi
,
edi
,
ebp
,
eax
;
...
...
arch/ia64/ia32/sys_ia32.c
View file @
708df1b0
...
...
@@ -54,6 +54,7 @@
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/intrinsics.h>
#include "ia32priv.h"
...
...
@@ -2192,7 +2193,7 @@ sys32_iopl (int level)
if
(
level
!=
3
)
return
(
-
EINVAL
);
/* Trying to gain more privileges? */
asm
volatile
(
"mov %0=ar.eflag ;;"
:
"=r"
(
old
)
);
old
=
ia64_getreg
(
_IA64_REG_AR_EFLAG
);
if
((
unsigned
int
)
level
>
((
old
>>
12
)
&
3
))
{
if
(
!
capable
(
CAP_SYS_RAWIO
))
return
-
EPERM
;
...
...
@@ -2216,7 +2217,7 @@ sys32_iopl (int level)
if
(
addr
>=
0
)
{
old
=
(
old
&
~
0x3000
)
|
(
level
<<
12
);
asm
volatile
(
"mov ar.eflag=%0;;"
::
"r"
(
old
)
);
ia64_setreg
(
_IA64_REG_AR_EFLAG
,
old
);
}
fput
(
file
);
...
...
arch/ia64/kernel/entry.S
View file @
708df1b0
...
...
@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall)
br.ret.sptk.many
rp
END
(
__ia64_syscall
)
GLOBAL_ENTRY
(
execve
)
mov
r15
=
__NR_execve
//
put
syscall
number
in
place
break
__BREAK_SYSCALL
br.ret.sptk.many
rp
END
(
execve
)
GLOBAL_ENTRY
(
clone
)
mov
r15
=
__NR_clone
//
put
syscall
number
in
place
break
__BREAK_SYSCALL
br.ret.sptk.many
rp
END
(
clone
)
/
*
*
We
invoke
syscall_trace
through
this
intermediate
function
to
*
ensure
that
the
syscall
input
arguments
are
not
clobbered
.
We
...
...
arch/ia64/kernel/fw-emu.c
View file @
708df1b0
...
...
@@ -46,17 +46,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
/*
* Simulator system call.
*/
static
long
ssc
(
long
arg0
,
long
arg1
,
long
arg2
,
long
arg3
,
int
nr
)
{
register
long
r8
asm
(
"r8"
);
asm
volatile
(
"mov r15=%1
\n\t
"
"break 0x80001"
:
"=r"
(
r8
)
:
"r"
(
nr
),
"r"
(
arg0
),
"r"
(
arg1
),
"r"
(
arg2
),
"r"
(
arg3
));
return
r8
;
}
extern
long
ssc
(
long
arg0
,
long
arg1
,
long
arg2
,
long
arg3
,
int
nr
);
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
...
...
@@ -127,101 +117,6 @@ offtime (unsigned long t, efi_time_t *tp)
*/
extern
void
pal_emulator_static
(
void
);
asm
(
" .proc pal_emulator_static
\n
"
"pal_emulator_static:"
" mov r8=-1
\n
"
" mov r9=256
\n
"
" ;;
\n
"
" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
\n
"
"(p6) br.cond.sptk.few static
\n
"
" ;;
\n
"
" mov r9=512
\n
"
" ;;
\n
"
" cmp.gtu p6,p7=r9,r28
\n
"
"(p6) br.cond.sptk.few stacked
\n
"
" ;;
\n
"
"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
\n
"
"(p7) br.cond.sptk.few 1f
\n
"
" ;;
\n
"
" mov r8=0 /* status = 0 */
\n
"
" movl r9=0x100000000 /* tc.base */
\n
"
" movl r10=0x0000000200000003 /* count[0], count[1] */
\n
"
" movl r11=0x1000000000002000 /* stride[0], stride[1] */
\n
"
" br.cond.sptk.few rp
\n
"
"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
\n
"
"(p7) br.cond.sptk.few 1f
\n
"
" mov r8=0 /* status = 0 */
\n
"
" movl r9 =0x100000064 /* proc_ratio (1/100) */
\n
"
" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
\n
"
" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
\n
"
" ;;
\n
"
"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
\n
"
"(p7) br.cond.sptk.few 1f
\n
"
" mov r8=0 /* status = 0 */
\n
"
" mov r9=96 /* num phys stacked */
\n
"
" mov r10=0 /* hints */
\n
"
" mov r11=0
\n
"
" br.cond.sptk.few rp
\n
"
"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
\n
"
"(p7) br.cond.sptk.few 1f
\n
"
" mov r9=ar.lc
\n
"
" movl r8=524288 /* flush 512k million cache lines (16MB) */
\n
"
" ;;
\n
"
" mov ar.lc=r8
\n
"
" movl r8=0xe000000000000000
\n
"
" ;;
\n
"
".loop: fc r8
\n
"
" add r8=32,r8
\n
"
" br.cloop.sptk.few .loop
\n
"
" sync.i
\n
"
" ;;
\n
"
" srlz.i
\n
"
" ;;
\n
"
" mov ar.lc=r9
\n
"
" mov r8=r0
\n
"
" ;;
\n
"
"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
\n
"
"(p7) br.cond.sptk.few 1f
\n
"
" mov r8=0 /* status = 0 */
\n
"
" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */
\n
"
" mov r10=0 /* reserved */
\n
"
" mov r11=0 /* reserved */
\n
"
" mov r16=0xffff /* implemented PMC */
\n
"
" mov r17=0xffff /* implemented PMD */
\n
"
" add r18=8,r29 /* second index */
\n
"
" ;;
\n
"
" st8 [r29]=r16,16 /* store implemented PMC */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
" st8 [r29]=r0,16 /* store implemented PMC */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
" st8 [r29]=r17,16 /* store implemented PMD */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" mov r16=0xf0 /* cycles count capable PMC */
\n
"
" ;;
\n
"
" st8 [r29]=r0,16 /* store implemented PMC */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" mov r17=0x10 /* retired bundles capable PMC */
\n
"
" ;;
\n
"
" st8 [r29]=r16,16 /* store cycles capable */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
" st8 [r29]=r0,16 /* store implemented PMC */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
" st8 [r29]=r17,16 /* store retired bundle capable */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
" st8 [r29]=r0,16 /* store implemented PMC */
\n
"
" st8 [r18]=r0,16 /* clear remaining bits */
\n
"
" ;;
\n
"
"1: br.cond.sptk.few rp
\n
"
"stacked:
\n
"
" br.ret.sptk.few rp
\n
"
" .endp pal_emulator_static
\n
"
);
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
...
...
@@ -268,14 +163,14 @@ efi_unimplemented (void)
return
EFI_UNSUPPORTED
;
}
static
long
static
struct
sal_ret_values
sal_emulator
(
long
index
,
unsigned
long
in1
,
unsigned
long
in2
,
unsigned
long
in3
,
unsigned
long
in4
,
unsigned
long
in5
,
unsigned
long
in6
,
unsigned
long
in7
)
{
register
long
r9
asm
(
"r9"
)
=
0
;
register
long
r10
asm
(
"r10"
)
=
0
;
register
long
r11
asm
(
"r11"
)
=
0
;
long
r9
=
0
;
long
r10
=
0
;
long
r11
=
0
;
long
status
;
/*
...
...
@@ -357,8 +252,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
}
else
{
status
=
-
1
;
}
asm
volatile
(
""
::
"r"
(
r9
),
"r"
(
r10
),
"r"
(
r11
));
return
status
;
return
((
struct
sal_ret_values
)
{
status
,
r9
,
r10
,
r11
});
}
...
...
arch/ia64/kernel/init_task.c
View file @
708df1b0
...
...
@@ -39,4 +39,4 @@ static union {
.
thread_info
=
INIT_THREAD_INFO
(
init_task_mem
.
s
.
task
)
}};
asm
(
".global init_task; init_task = init_task_mem"
);
extern
struct
task_struct
init_task
__attribute__
((
alias
(
"init_task_mem"
))
);
arch/ia64/kernel/iosapic.c
View file @
708df1b0
...
...
@@ -497,7 +497,7 @@ iosapic_register_intr (unsigned int gsi,
unsigned
long
polarity
,
unsigned
long
trigger
)
{
int
vector
;
unsigned
int
dest
=
(
ia64_get
_lid
(
)
>>
16
)
&
0xffff
;
unsigned
int
dest
=
(
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
;
vector
=
gsi_to_vector
(
gsi
);
if
(
vector
<
0
)
...
...
@@ -574,7 +574,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned
long
trigger
)
{
int
vector
;
unsigned
int
dest
=
(
ia64_get
_lid
(
)
>>
16
)
&
0xffff
;
unsigned
int
dest
=
(
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
;
vector
=
isa_irq_to_vector
(
isa_irq
);
...
...
@@ -668,11 +668,11 @@ iosapic_enable_intr (unsigned int vector)
* Direct the interrupt vector to the current cpu, platform redirection
* will distribute them.
*/
dest
=
(
ia64_get
_lid
(
)
>>
16
)
&
0xffff
;
dest
=
(
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
;
}
#else
/* direct the interrupt vector to the running cpu id */
dest
=
(
ia64_get
_lid
(
)
>>
16
)
&
0xffff
;
dest
=
(
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
;
#endif
set_rte
(
vector
,
dest
);
...
...
arch/ia64/kernel/irq_ia64.c
View file @
708df1b0
...
...
@@ -35,6 +35,7 @@
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/intrinsics.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
...
...
@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* because the register and the memory stack are not
* switched atomically.
*/
asm
(
"mov %0=ar.bsp"
:
"=r"
(
bsp
)
);
asm
(
"mov %0=sp"
:
"=r"
(
sp
)
);
bsp
=
ia64_getreg
(
_IA64_REG_AR_BSP
);
sp
=
ia64_getreg
(
_IA64_REG_AR_SP
);
if
((
sp
-
bsp
)
<
1024
)
{
static
unsigned
char
count
;
...
...
@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows).
*/
saved_tpr
=
ia64_get
_tpr
(
);
saved_tpr
=
ia64_get
reg
(
_IA64_REG_CR_TPR
);
ia64_srlz_d
();
while
(
vector
!=
IA64_SPURIOUS_INT_VECTOR
)
{
if
(
!
IS_RESCHEDULE
(
vector
))
{
ia64_set
_tpr
(
vector
);
ia64_set
reg
(
_IA64_REG_CR_TPR
,
vector
);
ia64_srlz_d
();
do_IRQ
(
local_vector_to_irq
(
vector
),
regs
);
...
...
@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* Disable interrupts and send EOI:
*/
local_irq_disable
();
ia64_set
_tpr
(
saved_tpr
);
ia64_set
reg
(
_IA64_REG_CR_TPR
,
saved_tpr
);
}
ia64_eoi
();
vector
=
ia64_get_ivr
();
...
...
@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
#ifdef CONFIG_SMP
phys_cpu_id
=
cpu_physical_id
(
cpu
);
#else
phys_cpu_id
=
(
ia64_get
_lid
(
)
>>
16
)
&
0xffff
;
phys_cpu_id
=
(
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
;
#endif
/*
...
...
arch/ia64/kernel/mca.c
View file @
708df1b0
...
...
@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void)
cmcv
.
cmcv_regval
=
0
;
cmcv
.
cmcv_mask
=
0
;
/* Unmask/enable interrupt */
cmcv
.
cmcv_vector
=
IA64_CMC_VECTOR
;
ia64_set
_cmcv
(
cmcv
.
cmcv_regval
);
ia64_set
reg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
);
IA64_MCA_DEBUG
(
"ia64_mca_platform_init: CPU %d corrected "
"machine check vector %#x setup and enabled.
\n
"
,
smp_processor_id
(),
IA64_CMC_VECTOR
);
IA64_MCA_DEBUG
(
"ia64_mca_platform_init: CPU %d CMCV = %#016lx
\n
"
,
smp_processor_id
(),
ia64_get
_cmcv
(
));
smp_processor_id
(),
ia64_get
reg
(
_IA64_REG_CR_CMCV
));
}
/*
...
...
@@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy)
{
cmcv_reg_t
cmcv
;
cmcv
=
(
cmcv_reg_t
)
ia64_get
_cmcv
(
);
cmcv
=
(
cmcv_reg_t
)
ia64_get
reg
(
_IA64_REG_CR_CMCV
);
cmcv
.
cmcv_mask
=
1
;
/* Mask/disable interrupt */
ia64_set
_cmcv
(
cmcv
.
cmcv_regval
);
ia64_set
reg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
)
IA64_MCA_DEBUG
(
"ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.
\n
"
,
...
...
@@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy)
{
cmcv_reg_t
cmcv
;
cmcv
=
(
cmcv_reg_t
)
ia64_get
_cmcv
(
);
cmcv
=
(
cmcv_reg_t
)
ia64_get
reg
(
_IA64_REG_CR_CMCV
);
cmcv
.
cmcv_mask
=
0
;
/* Unmask/enable interrupt */
ia64_set
_cmcv
(
cmcv
.
cmcv_regval
);
ia64_set
reg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
)
IA64_MCA_DEBUG
(
"ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.
\n
"
,
...
...
@@ -727,10 +727,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */
if
((
rc
=
ia64_sal_set_vectors
(
SAL_VECTOR_OS_INIT
,
ia64_mc_info
.
imi_monarch_init_handler
,
ia64_tpa
(
ia64_get
_gp
(
)),
ia64_tpa
(
ia64_get
reg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_monarch_init_handler_size
,
ia64_mc_info
.
imi_slave_init_handler
,
ia64_tpa
(
ia64_get
_gp
(
)),
ia64_tpa
(
ia64_get
reg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_slave_init_handler_size
)))
{
printk
(
KERN_ERR
"ia64_mca_init: Failed to register m/s init handlers with SAL. "
...
...
@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void)
do
{
switch
(
irr_num
)
{
case
0
:
irr
=
ia64_get
_irr0
(
);
irr
=
ia64_get
reg
(
_IA64_REG_CR_IRR0
);
break
;
case
1
:
irr
=
ia64_get
_irr1
(
);
irr
=
ia64_get
reg
(
_IA64_REG_CR_IRR1
);
break
;
case
2
:
irr
=
ia64_get
_irr2
(
);
irr
=
ia64_get
reg
(
_IA64_REG_CR_IRR2
);
break
;
case
3
:
irr
=
ia64_get
_irr3
(
);
irr
=
ia64_get
reg
(
_IA64_REG_CR_IRR3
);
break
;
}
}
while
(
!
(
irr
&
(
1
<<
irr_bit
)))
;
...
...
arch/ia64/kernel/perfmon.c
View file @
708df1b0
...
...
@@ -46,6 +46,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
#ifdef CONFIG_PERFMON
/*
...
...
@@ -679,39 +680,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx);
static
inline
void
pfm_clear_psr_pp
(
void
)
{
__asm__
__volatile__
(
"rsm psr.pp;; srlz.i;;"
:::
"memory"
);
ia64_rsm
(
IA64_PSR_PP
)
ia64_srlz_i
();
}
static
inline
void
pfm_set_psr_pp
(
void
)
{
__asm__
__volatile__
(
"ssm psr.pp;; srlz.i;;"
:::
"memory"
);
ia64_ssm
(
IA64_PSR_PP
)
ia64_srlz_i
();
}
static
inline
void
pfm_clear_psr_up
(
void
)
{
__asm__
__volatile__
(
"rsm psr.up;; srlz.i;;"
:::
"memory"
);
ia64_rsm
(
IA64_PSR_UP
)
ia64_srlz_i
();
}
static
inline
void
pfm_set_psr_up
(
void
)
{
__asm__
__volatile__
(
"ssm psr.up;; srlz.i;;"
:::
"memory"
);
ia64_ssm
(
IA64_PSR_UP
)
ia64_srlz_i
();
}
static
inline
unsigned
long
pfm_get_psr
(
void
)
{
unsigned
long
tmp
;
__asm__
__volatile__
(
"mov %0=psr;;"
:
"=r"
(
tmp
)
::
"memory"
);
tmp
=
ia64_getreg
(
_IA64_REG_PSR
);
ia64_srlz_i
();
return
tmp
;
}
static
inline
void
pfm_set_psr_l
(
unsigned
long
val
)
{
__asm__
__volatile__
(
"mov psr.l=%0;; srlz.i;;"
::
"r"
(
val
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_PSR_L
,
val
);
ia64_srlz_i
();
}
static
inline
void
...
...
@@ -978,7 +985,8 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if
(
ctx
->
ctx_fl_system
&&
(
PFM_CPUINFO_GET
()
&
PFM_CPUINFO_DCR_PP
))
{
/* disable dcr pp */
ia64_set_dcr
(
ia64_get_dcr
()
&
~
IA64_DCR_PP
);
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
&
~
IA64_DCR_PP
);
pfm_clear_psr_pp
();
}
else
{
pfm_clear_psr_up
();
...
...
@@ -1025,7 +1033,8 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if
(
ctx
->
ctx_fl_system
&&
(
PFM_CPUINFO_GET
()
&
PFM_CPUINFO_DCR_PP
))
{
/* enable dcr pp */
ia64_set_dcr
(
ia64_get_dcr
()
|
IA64_DCR_PP
);
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
|
IA64_DCR_PP
);
ia64_srlz_i
();
}
pfm_set_psr_l
(
psr
);
...
...
@@ -1781,7 +1790,8 @@ pfm_syswide_force_stop(void *info)
/*
* Update local PMU
*/
ia64_set_dcr
(
ia64_get_dcr
()
&
~
IA64_DCR_PP
);
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
&
~
IA64_DCR_PP
);
ia64_srlz_i
();
/*
* update local cpuinfo
...
...
@@ -3952,7 +3962,8 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*
* disable dcr pp
*/
ia64_set_dcr
(
ia64_get_dcr
()
&
~
IA64_DCR_PP
);
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
&
~
IA64_DCR_PP
);
ia64_srlz_i
();
/*
...
...
@@ -4042,7 +4053,8 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_set_psr_pp
();
/* enable dcr pp */
ia64_set_dcr
(
ia64_get_dcr
()
|
IA64_DCR_PP
);
ia64_setreg
(
_IA64_REG_CR_DCR
,
ia64_getreg
(
_IA64_REG_CR_DCR
)
|
IA64_DCR_PP
);
ia64_srlz_i
();
return
0
;
...
...
@@ -4207,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
current
->
pid
,
thread
->
pfm_context
,
ctx
));
old
=
ia64_cmpxchg
(
"acq"
,
&
thread
->
pfm_context
,
NULL
,
ctx
,
sizeof
(
pfm_context_t
*
));
old
=
ia64_cmpxchg
(
acq
,
&
thread
->
pfm_context
,
NULL
,
ctx
,
sizeof
(
pfm_context_t
*
));
if
(
old
!=
NULL
)
{
DPRINT
((
"load_pid [%d] already has a context
\n
"
,
req
->
load_pid
));
goto
error_unres
;
...
...
@@ -5467,13 +5479,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* if monitoring has started
*/
if
(
dcr_pp
)
{
dcr
=
ia64_get
_dcr
(
);
dcr
=
ia64_get
reg
(
_IA64_REG_CR_DCR
);
/*
* context switching in?
*/
if
(
is_ctxswin
)
{
/* mask monitoring for the idle task */
ia64_set
_dcr
(
dcr
&
~
IA64_DCR_PP
);
ia64_set
reg
(
_IA64_REG_CR_DCR
,
dcr
&
~
IA64_DCR_PP
);
pfm_clear_psr_pp
();
ia64_srlz_i
();
return
;
...
...
@@ -5485,7 +5497,7 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* Due to inlining this odd if-then-else construction generates
* better code.
*/
ia64_set
_dcr
(
dcr
|
IA64_DCR_PP
);
ia64_set
reg
(
_IA64_REG_CR_DCR
,
dcr
|
IA64_DCR_PP
);
pfm_set_psr_pp
();
ia64_srlz_i
();
}
...
...
@@ -6265,7 +6277,7 @@ pfm_init_percpu (void)
if
(
smp_processor_id
()
==
0
)
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
ia64_set
_pmv
(
IA64_PERFMON_VECTOR
);
ia64_set
reg
(
_IA64_REG_CR_PMV
,
IA64_PERFMON_VECTOR
);
ia64_srlz_d
();
/*
...
...
arch/ia64/kernel/setup.c
View file @
708df1b0
...
...
@@ -741,8 +741,8 @@ cpu_init (void)
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
* be fine).
*/
ia64_set
_dcr
(
IA64_DCR_DP
|
IA64_DCR_DK
|
IA64_DCR_DX
|
IA64_DCR_DR
|
IA64_DCR_DA
|
IA64_DCR_DD
|
IA64_DCR_LC
);
ia64_set
reg
(
_IA64_REG_CR_DCR
,
IA64_DCR_DP
|
IA64_DCR_DK
|
IA64_DCR_DX
|
IA64_DCR_D
R
|
IA64_DCR_D
A
|
IA64_DCR_DD
|
IA64_DCR_LC
);
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
if
(
current
->
mm
)
...
...
@@ -758,11 +758,11 @@ cpu_init (void)
ia64_set_itv
(
1
<<
16
);
ia64_set_lrr0
(
1
<<
16
);
ia64_set_lrr1
(
1
<<
16
);
ia64_set
_pmv
(
1
<<
16
);
ia64_set
_cmcv
(
1
<<
16
);
ia64_set
reg
(
_IA64_REG_CR_PMV
,
1
<<
16
);
ia64_set
reg
(
_IA64_REG_CR_CMCV
,
1
<<
16
);
/* clear TPR & XTP to enable all interrupt classes: */
ia64_set
_tpr
(
0
);
ia64_set
reg
(
_IA64_REG_CR_TPR
,
0
);
#ifdef CONFIG_SMP
normal_xtp
();
#endif
...
...
arch/ia64/kernel/signal.c
View file @
708df1b0
...
...
@@ -41,6 +41,8 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register
double
f16
asm
(
"f16"
);
register
double
f17
asm
(
"f17"
);
register
double
f18
asm
(
"f18"
);
register
double
f19
asm
(
"f19"
);
register
double
f20
asm
(
"f20"
);
register
double
f21
asm
(
"f21"
);
...
...
@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register
double
f26
asm
(
"f26"
);
register
double
f27
asm
(
"f27"
);
register
double
f28
asm
(
"f28"
);
register
double
f29
asm
(
"f29"
);
register
double
f30
asm
(
"f30"
);
register
double
f31
asm
(
"f31"
);
#endif
long
ia64_rt_sigsuspend
(
sigset_t
*
uset
,
size_t
sigsetsize
,
struct
sigscratch
*
scr
)
...
...
@@ -192,7 +195,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
case
__SI_TIMER
>>
16
:
err
|=
__put_user
(
from
->
si_tid
,
&
to
->
si_tid
);
err
|=
__put_user
(
from
->
si_overrun
,
&
to
->
si_overrun
);
err
|=
__put_user
(
from
->
si_value
,
&
to
->
si_value
);
err
|=
__put_user
(
from
->
si_value
.
sival_ptr
,
&
to
->
si_value
.
sival_ptr
);
break
;
case
__SI_CHLD
>>
16
:
err
|=
__put_user
(
from
->
si_utime
,
&
to
->
si_utime
);
...
...
arch/ia64/kernel/traps.c
View file @
708df1b0
...
...
@@ -7,6 +7,19 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
/* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
/*
* fp_emulate() needs to be able to access and update all floating point registers. Those
* saved in pt_regs can be accessed through that structure, but those not saved, will be
...
...
@@ -15,6 +28,8 @@
* by declaring preserved registers that are not marked as "fixed" as global register
* variables.
*/
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register
double
f2
asm
(
"f2"
);
register
double
f3
asm
(
"f3"
);
register
double
f4
asm
(
"f4"
);
register
double
f5
asm
(
"f5"
);
...
...
@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register
double
f26
asm
(
"f26"
);
register
double
f27
asm
(
"f27"
);
register
double
f28
asm
(
"f28"
);
register
double
f29
asm
(
"f29"
);
register
double
f30
asm
(
"f30"
);
register
double
f31
asm
(
"f31"
);
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
/* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
#endif
extern
spinlock_t
timerlist_lock
;
...
...
arch/ia64/kernel/unaligned.c
View file @
708df1b0
...
...
@@ -22,6 +22,7 @@
#include <asm/rse.h>
#include <asm/processor.h>
#include <asm/unaligned.h>
#include <asm/intrinsics.h>
extern
void
die_if_kernel
(
char
*
str
,
struct
pt_regs
*
regs
,
long
err
)
__attribute__
((
noreturn
));
...
...
@@ -231,7 +232,7 @@ static u16 fr_info[32]={
static
void
invala_gr
(
int
regno
)
{
# define F(reg) case reg:
__asm__ __volatile__ ("invala.e r%0" :: "i"(reg)
); break
# define F(reg) case reg:
ia64_invala_gr(reg
); break
switch
(
regno
)
{
F
(
0
);
F
(
1
);
F
(
2
);
F
(
3
);
F
(
4
);
F
(
5
);
F
(
6
);
F
(
7
);
...
...
@@ -258,7 +259,7 @@ invala_gr (int regno)
static
void
invala_fr
(
int
regno
)
{
# define F(reg) case reg:
__asm__ __volatile__ ("invala.e f%0" :: "i"(reg)
); break
# define F(reg) case reg:
ia64_invala_fr(reg
); break
switch
(
regno
)
{
F
(
0
);
F
(
1
);
F
(
2
);
F
(
3
);
F
(
4
);
F
(
5
);
F
(
6
);
F
(
7
);
...
...
@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
static
inline
void
float_spill_f0
(
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"stf.spill [%0]=f0"
::
"r"
(
final
)
:
"memory"
);
ia64_stf_spill
(
final
,
0
);
}
static
inline
void
float_spill_f1
(
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"stf.spill [%0]=f1"
::
"r"
(
final
)
:
"memory"
);
ia64_stf_spill
(
final
,
1
);
}
static
void
...
...
@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={
static
inline
void
mem2float_extended
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldfe f6=[%0];; stf.spill [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldfe
(
6
,
init
);
ia64_stop
();
ia64_stf_spill
(
final
,
6
);
}
static
inline
void
mem2float_integer
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldf8 f6=[%0];; stf.spill [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldf8
(
6
,
init
);
ia64_stop
();
ia64_stf_spill
(
final
,
6
);
}
static
inline
void
mem2float_single
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldfs f6=[%0];; stf.spill [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldfs
(
6
,
init
);
ia64_stop
();
ia64_stf_spill
(
final
,
6
);
}
static
inline
void
mem2float_double
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldfd f6=[%0];; stf.spill [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldfd
(
6
,
init
);
ia64_stop
();
ia64_stf_spill
(
final
,
6
);
}
static
inline
void
float2mem_extended
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldf.fill f6=[%0];; stfe [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldf_fill
(
6
,
init
);
ia64_stop
();
ia64_stfe
(
final
,
6
);
}
static
inline
void
float2mem_integer
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldf.fill f6=[%0];; stf8 [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldf_fill
(
6
,
init
);
ia64_stop
();
ia64_stf8
(
final
,
6
);
}
static
inline
void
float2mem_single
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldf.fill f6=[%0];; stfs [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldf_fill
(
6
,
init
);
ia64_stop
();
ia64_stfs
(
final
,
6
);
}
static
inline
void
float2mem_double
(
struct
ia64_fpreg
*
init
,
struct
ia64_fpreg
*
final
)
{
__asm__
__volatile__
(
"ldf.fill f6=[%0];; stfd [%1]=f6"
::
"r"
(
init
),
"r"
(
final
)
:
"f6"
,
"memory"
);
ia64_ldf_fill
(
6
,
init
);
ia64_stop
();
ia64_stfd
(
final
,
6
);
}
static
int
...
...
arch/ia64/mm/tlb.c
View file @
708df1b0
...
...
@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi
/*
* Flush ALAT entries also.
*/
asm
volatile
(
"ptc.ga %0,%1;;srlz.i;;"
::
"r"
(
start
),
"r"
(
nbits
<<
2
)
:
"memory"
);
ia64_ptcga
(
start
,
(
nbits
<<
2
));
ia64_srlz_i
(
);
start
+=
(
1UL
<<
nbits
);
}
while
(
start
<
end
);
}
...
...
@@ -118,15 +118,13 @@ local_flush_tlb_all (void)
local_irq_save
(
flags
);
for
(
i
=
0
;
i
<
count0
;
++
i
)
{
for
(
j
=
0
;
j
<
count1
;
++
j
)
{
asm
volatile
(
"ptc.e %0"
::
"r"
(
addr
)
);
ia64_ptce
(
addr
);
addr
+=
stride1
;
}
addr
+=
stride0
;
}
local_irq_restore
(
flags
);
ia64_insn_group_barrier
();
ia64_srlz_i
();
/* srlz.i implies srlz.d */
ia64_insn_group_barrier
();
}
void
...
...
@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
platform_global_tlb_purge
(
start
,
end
,
nbits
);
# else
do
{
asm
volatile
(
"ptc.l %0,%1"
::
"r"
(
start
),
"r"
(
nbits
<<
2
)
:
"memory"
);
ia64_ptcl
(
start
,
(
nbits
<<
2
)
);
start
+=
(
1UL
<<
nbits
);
}
while
(
start
<
end
);
# endif
ia64_insn_group_barrier
();
ia64_srlz_i
();
/* srlz.i implies srlz.d */
ia64_insn_group_barrier
();
}
void
__init
...
...
arch/ia64/sn/fakeprom/fw-emu.c
View file @
708df1b0
...
...
@@ -200,7 +200,7 @@ efi_unimplemented (void)
#ifdef SGI_SN2
#undef cpu_physical_id
#define cpu_physical_id(cpuid) ((ia64_get
_lid(
) >> 16) & 0xffff)
#define cpu_physical_id(cpuid) ((ia64_get
reg(_IA64_REG_CR_LID
) >> 16) & 0xffff)
void
fprom_send_cpei
(
void
)
{
...
...
@@ -224,14 +224,14 @@ fprom_send_cpei(void) {
#endif
static
long
static
struct
sal_ret_values
sal_emulator
(
long
index
,
unsigned
long
in1
,
unsigned
long
in2
,
unsigned
long
in3
,
unsigned
long
in4
,
unsigned
long
in5
,
unsigned
long
in6
,
unsigned
long
in7
)
{
register
long
r9
asm
(
"r9"
)
=
0
;
register
long
r10
asm
(
"r10"
)
=
0
;
register
long
r11
asm
(
"r11"
)
=
0
;
long
r9
=
0
;
long
r10
=
0
;
long
r11
=
0
;
long
status
;
/*
...
...
@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
}
asm
volatile
(
""
::
"r"
(
r9
),
"r"
(
r10
),
"r"
(
r11
));
return
status
;
return
((
struct
sal_ret_values
)
{
status
,
r9
,
r10
,
r11
})
;
}
...
...
arch/ia64/sn/kernel/irq.c
View file @
708df1b0
...
...
@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
irr_bit
=
irq_to_vector
(
irq
)
%
64
;
switch
(
irr_reg_num
)
{
case
0
:
irr_reg
=
ia64_get
_irr0
(
);
irr_reg
=
ia64_get
reg
(
_IA64_REG_CR_IRR0
);
break
;
case
1
:
irr_reg
=
ia64_get
_irr1
(
);
irr_reg
=
ia64_get
reg
(
_IA64_REG_CR_IRR1
);
break
;
case
2
:
irr_reg
=
ia64_get
_irr2
(
);
irr_reg
=
ia64_get
reg
(
_IA64_REG_CR_IRR2
);
break
;
case
3
:
irr_reg
=
ia64_get
_irr3
(
);
irr_reg
=
ia64_get
reg
(
_IA64_REG_CR_IRR3
);
break
;
}
if
(
!
test_bit
(
irr_bit
,
&
irr_reg
)
)
{
...
...
@@ -354,9 +354,9 @@ sn_get_next_bit(void) {
void
sn_set_tpr
(
int
vector
)
{
if
(
vector
>
IA64_LAST_DEVICE_VECTOR
||
vector
<
IA64_FIRST_DEVICE_VECTOR
)
{
ia64_set
_tpr
(
vector
);
ia64_set
reg
(
_IA64_REG_CR_TPR
,
vector
);
}
else
{
ia64_set
_tpr
(
IA64_LAST_DEVICE_VECTOR
);
ia64_set
reg
(
_IA64_REG_CR_TPR
,
IA64_LAST_DEVICE_VECTOR
);
}
}
...
...
arch/ia64/sn/kernel/setup.c
View file @
708df1b0
...
...
@@ -395,7 +395,7 @@ sn_cpu_init(void)
return
;
cpuid
=
smp_processor_id
();
cpuphyid
=
((
ia64_get
_lid
(
)
>>
16
)
&
0xffff
);
cpuphyid
=
((
ia64_get
reg
(
_IA64_REG_CR_LID
)
>>
16
)
&
0xffff
);
nasid
=
cpu_physical_id_to_nasid
(
cpuphyid
);
cnode
=
nasid_to_cnodeid
(
nasid
);
slice
=
cpu_physical_id_to_slice
(
cpuphyid
);
...
...
arch/ia64/sn/kernel/sn2/io.c
View file @
708df1b0
...
...
@@ -11,81 +11,73 @@
#include <asm/sn/sn2/io.h>
#undef __sn_inb
#undef __sn_inw
#undef __sn_inl
#undef __sn_outb
#undef __sn_outw
#undef __sn_outl
#undef __sn_readb
#undef __sn_readw
#undef __sn_readl
#undef __sn_readq
unsigned
int
sn_inb
(
unsigned
long
port
)
__
sn_inb
(
unsigned
long
port
)
{
return
__sn_inb
(
port
);
return
__
_
sn_inb
(
port
);
}
unsigned
int
sn_inw
(
unsigned
long
port
)
__
sn_inw
(
unsigned
long
port
)
{
return
__sn_inw
(
port
);
return
__
_
sn_inw
(
port
);
}
unsigned
int
sn_inl
(
unsigned
long
port
)
__
sn_inl
(
unsigned
long
port
)
{
return
__sn_inl
(
port
);
return
__
_
sn_inl
(
port
);
}
void
sn_outb
(
unsigned
char
val
,
unsigned
long
port
)
__
sn_outb
(
unsigned
char
val
,
unsigned
long
port
)
{
__sn_outb
(
val
,
port
);
__
_
sn_outb
(
val
,
port
);
}
void
sn_outw
(
unsigned
short
val
,
unsigned
long
port
)
__
sn_outw
(
unsigned
short
val
,
unsigned
long
port
)
{
__sn_outw
(
val
,
port
);
__
_
sn_outw
(
val
,
port
);
}
void
sn_outl
(
unsigned
int
val
,
unsigned
long
port
)
__
sn_outl
(
unsigned
int
val
,
unsigned
long
port
)
{
__sn_outl
(
val
,
port
);
__
_
sn_outl
(
val
,
port
);
}
unsigned
char
sn_readb
(
void
*
addr
)
__
sn_readb
(
void
*
addr
)
{
return
__sn_readb
(
addr
);
return
__
_
sn_readb
(
addr
);
}
unsigned
short
sn_readw
(
void
*
addr
)
__
sn_readw
(
void
*
addr
)
{
return
__sn_readw
(
addr
);
return
__
_
sn_readw
(
addr
);
}
unsigned
int
sn_readl
(
void
*
addr
)
__
sn_readl
(
void
*
addr
)
{
return
__sn_readl
(
addr
);
return
__
_
sn_readl
(
addr
);
}
unsigned
long
sn_readq
(
void
*
addr
)
__
sn_readq
(
void
*
addr
)
{
return
__sn_readq
(
addr
);
return
__
_
sn_readq
(
addr
);
}
/* define aliases: */
asm
(
".global __sn_inb, __sn_inw, __sn_inl"
);
asm
(
"__sn_inb = sn_inb"
);
asm
(
"__sn_inw = sn_inw"
);
asm
(
"__sn_inl = sn_inl"
);
asm
(
".global __sn_outb, __sn_outw, __sn_outl"
);
asm
(
"__sn_outb = sn_outb"
);
asm
(
"__sn_outw = sn_outw"
);
asm
(
"__sn_outl = sn_outl"
);
asm
(
".global __sn_readb, __sn_readw, __sn_readl, __sn_readq"
);
asm
(
"__sn_readb = sn_readb"
);
asm
(
"__sn_readw = sn_readw"
);
asm
(
"__sn_readl = sn_readl"
);
asm
(
"__sn_readq = sn_readq"
);
arch/ia64/vmlinux.lds.S
View file @
708df1b0
...
...
@@ -35,6 +35,7 @@ SECTIONS
{
*(.
text.ivt
)
*(.
text
)
*(.
gnu.linkonce.t
*)
}
.
text2
:
AT
(
ADDR
(
.
text2
)
-
LOAD_OFFSET
)
{
*(
.
text2
)
}
...
...
@@ -183,7 +184,7 @@ SECTIONS
.
=
__phys_per_cpu_start
+
PERCPU_PAGE_SIZE
; /* ensure percpu data fits into percpu page size */
.
data
:
AT
(
ADDR
(
.
data
)
-
LOAD_OFFSET
)
{
*(
.
data
)
*(
.
gnu
.
linkonce
.
d
*)
CONSTRUCTORS
}
{
*(
.
data
)
*(
.
data1
)
*(
.
gnu
.
linkonce
.
d
*)
CONSTRUCTORS
}
.
=
ALIGN
(
16
)
;
__gp
=
.
+
0x200000
; /* gp must be 16-byte aligned for exc. table */
...
...
@@ -194,7 +195,7 @@ SECTIONS
can
access
them
all
,
and
initialized
data
all
before
uninitialized
,
so
we
can
shorten
the
on
-
disk
segment
size
.
*/
.
sdata
:
AT
(
ADDR
(
.
sdata
)
-
LOAD_OFFSET
)
{
*(
.
sdata
)
}
{
*(
.
sdata
)
*(
.
sdata1
)
*(
.
srdata
)
}
_edata
=
.
;
_bss
=
.
;
.
sbss
:
AT
(
ADDR
(
.
sbss
)
-
LOAD_OFFSET
)
...
...
include/asm-ia64/atomic.h
View file @
708df1b0
...
...
@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
+
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
}
while
(
ia64_cmpxchg
(
acq
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
...
...
@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
+
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
}
while
(
ia64_cmpxchg
(
acq
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
...
...
@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v)
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
-
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
}
while
(
ia64_cmpxchg
(
acq
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
...
...
@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
-
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
}
while
(
ia64_cmpxchg
(
acq
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
...
...
include/asm-ia64/bitops.h
View file @
708df1b0
...
...
@@ -292,7 +292,7 @@ ffz (unsigned long x)
{
unsigned
long
result
;
__asm__
(
"popcnt %0=%1"
:
"=r"
(
result
)
:
"r"
(
x
&
(
~
x
-
1
)));
result
=
ia64_popcnt
(
(
x
&
(
~
x
-
1
)));
return
result
;
}
...
...
@@ -307,7 +307,7 @@ __ffs (unsigned long x)
{
unsigned
long
result
;
__asm__
(
"popcnt %0=%1"
:
"=r"
(
result
)
:
"r"
((
x
-
1
)
&
~
x
)
);
result
=
ia64_popcnt
((
x
-
1
)
&
~
x
);
return
result
;
}
...
...
@@ -323,7 +323,7 @@ ia64_fls (unsigned long x)
long
double
d
=
x
;
long
exp
;
__asm__
(
"getf.exp %0=%1"
:
"=r"
(
exp
)
:
"f"
(
d
)
);
exp
=
ia64_getf_exp
(
d
);
return
exp
-
0xffff
;
}
...
...
@@ -349,7 +349,7 @@ static __inline__ unsigned long
hweight64
(
unsigned
long
x
)
{
unsigned
long
result
;
__asm__
(
"popcnt %0=%1"
:
"=r"
(
result
)
:
"r"
(
x
)
);
result
=
ia64_popcnt
(
x
);
return
result
;
}
...
...
include/asm-ia64/byteorder.h
View file @
708df1b0
...
...
@@ -7,13 +7,14 @@
*/
#include <asm/types.h>
#include <asm/intrinsics.h>
static
__inline__
__const__
__u64
__ia64_swab64
(
__u64
x
)
{
__u64
result
;
__asm__
(
"mux1 %0=%1,@rev"
:
"=r"
(
result
)
:
"r"
(
x
)
);
result
=
ia64_mux1
(
x
,
ia64_mux1_rev
);
return
result
;
}
...
...
include/asm-ia64/current.h
View file @
708df1b0
...
...
@@ -6,8 +6,9 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/intrinsics.h>
/* In kernel mode, thread pointer (r13) is used to point to the
current task structure. */
register
struct
task_struct
*
current
asm
(
"r13"
);
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif
/* _ASM_IA64_CURRENT_H */
include/asm-ia64/delay.h
View file @
708df1b0
...
...
@@ -18,11 +18,13 @@
#include <linux/compiler.h>
#include <asm/processor.h>
#include <asm/intrinsics.h>
static
__inline__
void
ia64_set_itm
(
unsigned
long
val
)
{
__asm__
__volatile__
(
"mov cr.itm=%0;; srlz.d;;"
::
"r"
(
val
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_ITM
,
val
);
ia64_srlz_d
();
}
static
__inline__
unsigned
long
...
...
@@ -30,20 +32,23 @@ ia64_get_itm (void)
{
unsigned
long
result
;
__asm__
__volatile__
(
"mov %0=cr.itm;; srlz.d;;"
:
"=r"
(
result
)
::
"memory"
);
result
=
ia64_getreg
(
_IA64_REG_CR_ITM
);
ia64_srlz_d
();
return
result
;
}
static
__inline__
void
ia64_set_itv
(
unsigned
long
val
)
{
__asm__
__volatile__
(
"mov cr.itv=%0;; srlz.d;;"
::
"r"
(
val
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_ITV
,
val
);
ia64_srlz_d
();
}
static
__inline__
void
ia64_set_itc
(
unsigned
long
val
)
{
__asm__
__volatile__
(
"mov ar.itc=%0;; srlz.d;;"
::
"r"
(
val
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_AR_ITC
,
val
);
ia64_srlz_d
();
}
static
__inline__
unsigned
long
...
...
@@ -51,10 +56,13 @@ ia64_get_itc (void)
{
unsigned
long
result
;
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
result
=
ia64_getreg
(
_IA64_REG_AR_ITC
);
ia64_barrier
();
#ifdef CONFIG_ITANIUM
while
(
unlikely
((
__s32
)
result
==
-
1
))
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
while
(
unlikely
((
__s32
)
result
==
-
1
))
{
result
=
ia64_getreg
(
_IA64_REG_AR_ITC
);
ia64_barrier
();
}
#endif
return
result
;
}
...
...
@@ -62,15 +70,11 @@ ia64_get_itc (void)
static
__inline__
void
__delay
(
unsigned
long
loops
)
{
unsigned
long
saved_ar_lc
;
if
(
loops
<
1
)
return
;
__asm__
__volatile__
(
"mov %0=ar.lc;;"
:
"=r"
(
saved_ar_lc
));
__asm__
__volatile__
(
"mov ar.lc=%0;;"
::
"r"
(
loops
-
1
));
__asm__
__volatile__
(
"1:
\t
br.cloop.sptk.few 1b;;"
);
__asm__
__volatile__
(
"mov ar.lc=%0"
::
"r"
(
saved_ar_lc
));
for
(;
loops
--
;)
ia64_nop
(
0
);
}
static
__inline__
void
...
...
include/asm-ia64/gcc_intrin.h
0 → 100644
View file @
708df1b0
#ifndef _ASM_IA64_GCC_INTRIN_H
#define _ASM_IA64_GCC_INTRIN_H
/*
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
/* define this macro to get some asm stmts included in 'c' files */
#define ASM_SUPPORTED
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define ia64_barrier() __asm__ __volatile__ ("":::"memory")
#define ia64_stop() __asm__ __volatile__ (";;"::)
#define ia64_invala_gr(regnum) \
__asm__ __volatile__ ("invala.e r%0" :: "i"(regnum))
#define ia64_invala_fr(regnum) \
__asm__ __volatile__ ("invala.e f%0" :: "i"(regnum))
extern
void
ia64_bad_param_for_setreg
(
void
);
extern
void
ia64_bad_param_for_getreg
(
void
);
#define ia64_setreg(regnum, val) \
({ \
switch (regnum) { \
case _IA64_REG_PSR_L: \
__asm__ __volatile__ ("mov psr.l=%0" :: "r"(val) : "memory"); \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
__asm__ __volatile__ ("mov ar%0=%1" :: \
"i" (regnum - _IA64_REG_AR_KR0), \
"r"(val): "memory"); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
__asm__ __volatile__ ("mov cr%0=%1" :: \
"i" (regnum - _IA64_REG_CR_DCR), \
"r"(val): "memory" ); \
break; \
case _IA64_REG_SP: \
__asm__ __volatile__ ("mov r12=%0" :: \
"r"(val): "memory"); \
break; \
case _IA64_REG_GP: \
__asm__ __volatile__ ("mov gp=%0" :: "r"(val) : "memory"); \
break; \
default: \
ia64_bad_param_for_setreg(); \
break; \
} \
})
#define ia64_getreg(regnum) \
({ \
__u64 ia64_intri_res; \
\
switch (regnum) { \
case _IA64_REG_GP: \
__asm__ __volatile__ ("mov %0=gp" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_IP: \
__asm__ __volatile__ ("mov %0=ip" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_PSR: \
__asm__ __volatile__ ("mov %0=psr" : "=r"(ia64_intri_res));\
break; \
case _IA64_REG_TP:
/* for current() */
\
{ \
register __u64 ia64_r13 asm ("r13"); \
ia64_intri_res = ia64_r13; \
} \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
__asm__ __volatile__ ("mov %0=ar%1" : "=r" (ia64_intri_res) \
: "i"(regnum - _IA64_REG_AR_KR0)); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
__asm__ __volatile__ ("mov %0=cr%1" : "=r" (ia64_intri_res) \
: "i" (regnum - _IA64_REG_CR_DCR)); \
break; \
case _IA64_REG_SP: \
__asm__ __volatile__ ("mov %0=sp" : "=r" (ia64_intri_res)); \
break; \
default: \
ia64_bad_param_for_getreg(); \
break; \
} \
ia64_intri_res; \
})
#define ia64_hint_pause 0
#define ia64_hint(mode) \
({ \
switch (mode) { \
case ia64_hint_pause: \
asm volatile ("hint @pause" ::: "memory"); \
break; \
} \
})
/* Integer values for mux1 instruction */
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1(x, mode) \
({ \
__u64 ia64_intri_res; \
\
switch (mode) { \
case ia64_mux1_brcst: \
__asm__ ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_mix: \
__asm__ ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_shuf: \
__asm__ ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_alt: \
__asm__ ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_rev: \
__asm__ ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
} \
ia64_intri_res; \
})
#define ia64_popcnt(x) \
({ \
__u64 ia64_intri_res; \
__asm__ ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
\
ia64_intri_res; \
})
#define ia64_getf_exp(x) \
({ \
long ia64_intri_res; \
\
__asm__ ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
\
ia64_intri_res; \
})
#define ia64_shrp(a, b, count) \
({ \
__u64 ia64_intri_res; \
__asm__ ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
ia64_intri_res; \
})
#define ia64_ldfs(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfd(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf_fill(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_stfs(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfd(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf_spill(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_fetchadd4_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd4_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
__asm__ __volatile__ ("fetchadd8.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_xchg1(ptr,x) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg2(ptr,x) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg4(ptr,x) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg8(ptr,x) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
__asm__ __volatile__ ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
__asm__ __volatile__ ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
__asm__ __volatile__ ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_mf() __asm__ __volatile__ ("mf" ::: "memory")
#define ia64_mfa() __asm__ __volatile__ ("mf.a" ::: "memory")
#define ia64_invala() __asm__ __volatile__ ("invala" ::: "memory")
#define ia64_thash(addr) \
({ \
__u64 ia64_intri_res; \
__asm__ __volatile__ ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
ia64_intri_res; \
})
#define ia64_srlz_i() __asm__ __volatile__ (";; srlz.i ;;" ::: "memory")
#define ia64_srlz_d() __asm__ __volatile__ (";; srlz.d" ::: "memory");
#define ia64_nop(x) __asm__ __volatile__ ("nop %0"::"i"(x));
#define ia64_itci(addr) __asm__ __volatile__ ("itc.i %0;;" :: "r"(addr) : "memory")
#define ia64_itcd(addr) __asm__ __volatile__ ("itc.d %0;;" :: "r"(addr) : "memory")
#define ia64_itri(trnum, addr) __asm__ __volatile__ ("itr.i itr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_itrd(trnum, addr) __asm__ __volatile__ ("itr.d dtr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_tpa(addr) \
({ \
__u64 ia64_pa; \
__asm__ __volatile__ ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : \
"memory"); \
ia64_pa; \
})
#define __ia64_set_dbr(index, val) \
__asm__ __volatile__ ("mov dbr[%0]=%1" :: "r"(index), "r"(val) \
: "memory")
#define ia64_set_ibr(index, val) \
__asm__ __volatile__ ("mov ibr[%0]=%1" :: "r"(index), "r"(val) \
: "memory")
#define ia64_set_pkr(index, val) \
__asm__ __volatile__ ("mov pkr[%0]=%1" :: "r"(index), "r"(val) \
: "memory")
#define ia64_set_pmc(index, val) \
__asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(index), "r"(val) \
: "memory");
#define ia64_set_pmd(index, val) \
__asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(index), "r"(val) \
: "memory");
#define ia64_set_rr(index, val) \
__asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(index), "r"(val) \
: "memory");
#define ia64_get_cpuid(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
\
ia64_intri_res; \
})
#define __ia64_get_dbr(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
\
ia64_intri_res; \
})
#define ia64_get_ibr(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
\
ia64_intri_res; \
})
#define ia64_get_pkr(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
\
ia64_intri_res; \
})
#define ia64_get_pmc(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
\
ia64_intri_res; \
})
#define ia64_get_pmd(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
\
ia64_intri_res; \
})
#define ia64_get_rr(index) \
({ \
__u64 ia64_intri_res; \
\
__asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" \
(index)); \
\
ia64_intri_res; \
})
#define ia64_fc(addr) \
__asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory");
#define ia64_sync_i() \
__asm__ __volatile__ (";; sync.i" ::: "memory")
#define ia64_ssm(mask) __asm__ __volatile__ ("ssm %0":: "i"((mask)) : "memory");
#define ia64_rsm(mask) __asm__ __volatile__ ("rsm %0":: "i"((mask)) : "memory");
#define ia64_sum(mask) __asm__ __volatile__ ("sum %0":: "i"((mask)) : "memory");
#define ia64_rum(mask) __asm__ __volatile__ ("rum %0":: "i"((mask)) : "memory");
#define ia64_ptce(addr) \
__asm__ __volatile__ ("ptc.e %0" :: "r"(addr))
#define ia64_ptcga(addr, size) \
__asm__ __volatile__ ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptcl(addr, size) \
__asm__ __volatile__ ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptri(addr, size) \
__asm__ __volatile__ ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptrd(addr, size) \
__asm__ __volatile__ ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
__asm__ __volatile__ ("lfetch [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
__asm__ __volatile__ ("lfetch.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
__asm__ __volatile__ ("lfetch.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
__asm__ __volatile__ ("lfetch.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
__asm__ __volatile__ ("lfetch.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
__asm__ __volatile__ ("lfetch.excl.nt1 [%0]" :: "r"(y));\
break; \
case ia64_lfhint_nt2: \
__asm__ __volatile__ ("lfetch.excl.nt2 [%0]" :: "r"(y));\
break; \
case ia64_lfhint_nta: \
__asm__ __volatile__ ("lfetch.excl.nta [%0]" :: "r"(y));\
break; \
} \
})
#define ia64_lfetch_fault(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
__asm__ __volatile__ ("lfetch.fault [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
__asm__ __volatile__ ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
__asm__ __volatile__ ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
__asm__ __volatile__ ("lfetch.fault.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_fault_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
__asm__ __volatile__ ("lfetch.fault.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
__asm__ __volatile__ ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt2: \
__asm__ __volatile__ ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nta: \
__asm__ __volatile__ ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
break; \
} \
})
#define ia64_intrin_local_irq_restore(x) \
do { \
__asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
: : "r"((x)) \
: "p6", "p7", "memory"); \
} while (0)
#endif
/* _ASM_IA64_GCC_INTRIN_H */
include/asm-ia64/ia64regs.h
0 → 100644
View file @
708df1b0
/*
* Copyright (C) 2002,2003 Intel Corp.
* Jun Nakajima <jun.nakajima@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_IA64_IA64REGS_H
#define _ASM_IA64_IA64REGS_H
/*
** Register Names for getreg() and setreg()
*/
/* Special Registers */
#define _IA64_REG_IP 1016
/* getreg only */
#define _IA64_REG_PSR 1019
#define _IA64_REG_PSR_L 1019
// General Integer Registers
#define _IA64_REG_GP 1025
/* R1 */
#define _IA64_REG_R8 1032
/* R8 */
#define _IA64_REG_R9 1033
/* R9 */
#define _IA64_REG_SP 1036
/* R12 */
#define _IA64_REG_TP 1037
/* R13 */
/* Application Registers */
#define _IA64_REG_AR_KR0 3072
#define _IA64_REG_AR_KR1 3073
#define _IA64_REG_AR_KR2 3074
#define _IA64_REG_AR_KR3 3075
#define _IA64_REG_AR_KR4 3076
#define _IA64_REG_AR_KR5 3077
#define _IA64_REG_AR_KR6 3078
#define _IA64_REG_AR_KR7 3079
#define _IA64_REG_AR_RSC 3088
#define _IA64_REG_AR_BSP 3089
#define _IA64_REG_AR_BSPSTORE 3090
#define _IA64_REG_AR_RNAT 3091
#define _IA64_REG_AR_FCR 3093
#define _IA64_REG_AR_EFLAG 3096
#define _IA64_REG_AR_CSD 3097
#define _IA64_REG_AR_SSD 3098
#define _IA64_REG_AR_CFLAG 3099
#define _IA64_REG_AR_FSR 3100
#define _IA64_REG_AR_FIR 3101
#define _IA64_REG_AR_FDR 3102
#define _IA64_REG_AR_CCV 3104
#define _IA64_REG_AR_UNAT 3108
#define _IA64_REG_AR_FPSR 3112
#define _IA64_REG_AR_ITC 3116
#define _IA64_REG_AR_PFS 3136
#define _IA64_REG_AR_LC 3137
#define _IA64_REG_AR_EC 3138
/* Control Registers */
#define _IA64_REG_CR_DCR 4096
#define _IA64_REG_CR_ITM 4097
#define _IA64_REG_CR_IVA 4098
#define _IA64_REG_CR_PTA 4104
#define _IA64_REG_CR_IPSR 4112
#define _IA64_REG_CR_ISR 4113
#define _IA64_REG_CR_IIP 4115
#define _IA64_REG_CR_IFA 4116
#define _IA64_REG_CR_ITIR 4117
#define _IA64_REG_CR_IIPA 4118
#define _IA64_REG_CR_IFS 4119
#define _IA64_REG_CR_IIM 4120
#define _IA64_REG_CR_IHA 4121
#define _IA64_REG_CR_LID 4160
#define _IA64_REG_CR_IVR 4161
/* getreg only */
#define _IA64_REG_CR_TPR 4162
#define _IA64_REG_CR_EOI 4163
#define _IA64_REG_CR_IRR0 4164
/* getreg only */
#define _IA64_REG_CR_IRR1 4165
/* getreg only */
#define _IA64_REG_CR_IRR2 4166
/* getreg only */
#define _IA64_REG_CR_IRR3 4167
/* getreg only */
#define _IA64_REG_CR_ITV 4168
#define _IA64_REG_CR_PMV 4169
#define _IA64_REG_CR_CMCV 4170
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000
/* getindreg only */
#define _IA64_REG_INDR_DBR 9001
#define _IA64_REG_INDR_IBR 9002
#define _IA64_REG_INDR_PKR 9003
#define _IA64_REG_INDR_PMC 9004
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
#endif
/* _ASM_IA64_IA64REGS_H */
include/asm-ia64/intrinsics.h
View file @
708df1b0
...
...
@@ -8,8 +8,17 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef __ASSEMBLY__
#include <linux/config.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
#include <asm/intel_intrin.h>
#else
#include <asm/gcc_intrin.h>
#endif
/*
* Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value.
...
...
@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
switch (sz) { \
case 4: \
__asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
break; \
\
case 8: \
__asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
break; \
\
default: \
...
...
@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
(__typeof__(*(v))) (_tmp);
/* return old value */
\
})
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v,
"rel"
) + (i))
/* return new value */
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v,
rel
) + (i))
/* return new value */
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
*/
extern
void
__xchg_called_with_bad_pointer
(
void
);
static
__inline__
unsigned
long
__xchg
(
unsigned
long
x
,
volatile
void
*
ptr
,
int
size
)
{
unsigned
long
result
;
switch
(
size
)
{
case
1
:
__asm__
__volatile
(
"xchg1 %0=[%1],%2"
:
"=r"
(
result
)
:
"r"
(
ptr
),
"r"
(
x
)
:
"memory"
);
return
result
;
case
2
:
__asm__
__volatile
(
"xchg2 %0=[%1],%2"
:
"=r"
(
result
)
:
"r"
(
ptr
),
"r"
(
x
)
:
"memory"
);
return
result
;
case
4
:
__asm__
__volatile
(
"xchg4 %0=[%1],%2"
:
"=r"
(
result
)
:
"r"
(
ptr
),
"r"
(
x
)
:
"memory"
);
return
result
;
case
8
:
__asm__
__volatile
(
"xchg8 %0=[%1],%2"
:
"=r"
(
result
)
:
"r"
(
ptr
),
"r"
(
x
)
:
"memory"
);
return
result
;
}
__xchg_called_with_bad_pointer
();
return
x
;
}
extern
void
ia64_xchg_called_with_bad_pointer
(
void
);
#define __xchg(x,ptr,size) \
({ \
unsigned long __xchg_result; \
\
switch (size) { \
case 1: \
__xchg_result = ia64_xchg1((__u8 *)ptr, x); \
break; \
\
case 2: \
__xchg_result = ia64_xchg2((__u16 *)ptr, x); \
break; \
\
case 4: \
__xchg_result = ia64_xchg4((__u32 *)ptr, x); \
break; \
\
case 8: \
__xchg_result = ia64_xchg8((__u64 *)ptr, x); \
break; \
default: \
ia64_xchg_called_with_bad_pointer(); \
} \
__xchg_result; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
...
...
@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size)
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern
long
_
_cmpxchg_called_with_bad_pointer
(
void
);
extern
long
ia64
_cmpxchg_called_with_bad_pointer
(
void
);
#define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \
__typeof__(ptr) _p_ = (ptr); \
__typeof__(new) _n_ = (new); \
__u64 _o_, _r_; \
\
switch (size) { \
...
...
@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void);
case 8: _o_ = (__u64) (long) (old); break; \
default: break; \
} \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \
case 1: \
__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
break; \
\
case 2: \
__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
break; \
\
case 4: \
__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
break; \
\
case 8: \
__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
break; \
\
default: \
_r_ =
_
_cmpxchg_called_with_bad_pointer(); \
_r_ =
ia64
_cmpxchg_called_with_bad_pointer(); \
break; \
} \
(__typeof__(old)) _r_; \
})
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(
"acq"
, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(
"rel"
, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(
acq
, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(
rel
, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
...
...
@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \
extern int printk(const char *fmt, ...); \
asm ("mov %0=ip" : "=r"(ip)
); \
ip = ia64_getreg(_IA64_REG_IP
); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \
} \
...
...
@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void);
# define CMPXCHG_BUGCHECK(v)
#endif
/* !CONFIG_IA64_DEBUG_CMPXCHG */
#endif
#endif
/* _ASM_IA64_INTRINSICS_H */
include/asm-ia64/io.h
View file @
708df1b0
...
...
@@ -55,6 +55,7 @@ extern unsigned int num_io_spaces;
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/intrinsics.h>
/*
* Change virtual addresses to physical addresses and vv.
...
...
@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address)
* Memory fence w/accept. This should never be used in code that is
* not IA-64 specific.
*/
#define __ia64_mf_a()
__asm__ __volatile__ ("mf.a" ::: "memory"
)
#define __ia64_mf_a()
ia64_mfa(
)
static
inline
const
unsigned
long
__ia64_get_io_port_base
(
void
)
...
...
include/asm-ia64/machvec.h
View file @
708df1b0
...
...
@@ -155,7 +155,7 @@ struct ia64_machine_vector {
ia64_mv_readw_t
*
readw
;
ia64_mv_readl_t
*
readl
;
ia64_mv_readq_t
*
readq
;
}
;
}
__attribute__
((
__aligned__
(
16
)));
/* align attrib? see above comment */
#define MACHVEC_INIT(name) \
{ \
...
...
include/asm-ia64/mmu_context.h
View file @
708df1b0
...
...
@@ -158,9 +158,7 @@ reload_context (mm_context_t context)
ia64_set_rr
(
0x4000000000000000
,
rr2
);
ia64_set_rr
(
0x6000000000000000
,
rr3
);
ia64_set_rr
(
0x8000000000000000
,
rr4
);
ia64_insn_group_barrier
();
ia64_srlz_i
();
/* srlz.i implies srlz.d */
ia64_insn_group_barrier
();
}
static
inline
void
...
...
include/asm-ia64/page.h
View file @
708df1b0
...
...
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <asm/types.h>
#include <asm/intrinsics.h>
/*
* PAGE_SHIFT determines the actual kernel page size.
...
...
@@ -143,7 +144,7 @@ get_order (unsigned long size)
double
d
=
size
-
1
;
long
order
;
__asm__
(
"getf.exp %0=%1"
:
"=r"
(
order
)
:
"f"
(
d
)
);
order
=
ia64_getf_exp
(
d
);
order
=
order
-
PAGE_SHIFT
-
0xffff
+
1
;
if
(
order
<
0
)
order
=
0
;
...
...
include/asm-ia64/pal.h
View file @
708df1b0
...
...
@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector
/* Initialize the processor controlled caches */
static
inline
s64
ia64_pal_cache_init
(
u64
level
,
u64
cache_type
,
u64
rest
rict
)
ia64_pal_cache_init
(
u64
level
,
u64
cache_type
,
u64
rest
)
{
struct
ia64_pal_retval
iprv
;
PAL_CALL
(
iprv
,
PAL_CACHE_INIT
,
level
,
cache_type
,
rest
rict
);
PAL_CALL
(
iprv
,
PAL_CACHE_INIT
,
level
,
cache_type
,
rest
);
return
iprv
.
status
;
}
...
...
include/asm-ia64/processor.h
View file @
708df1b0
...
...
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/kregs.h>
#include <asm/ustack.h>
#include <asm/intrinsics.h>
#define IA64_NUM_DBG_REGS 8
/*
...
...
@@ -356,38 +357,42 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return stack pointer of blocked task TSK. */
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
static
inline
unsigned
long
ia64_get_kr
(
unsigned
long
regnum
)
{
unsigned
long
r
=
0
;
switch
(
regnum
)
{
case
0
:
asm
volatile
(
"mov %0=ar.k0"
:
"=r"
(
r
));
break
;
case
1
:
asm
volatile
(
"mov %0=ar.k1"
:
"=r"
(
r
));
break
;
case
2
:
asm
volatile
(
"mov %0=ar.k2"
:
"=r"
(
r
));
break
;
case
3
:
asm
volatile
(
"mov %0=ar.k3"
:
"=r"
(
r
));
break
;
case
4
:
asm
volatile
(
"mov %0=ar.k4"
:
"=r"
(
r
));
break
;
case
5
:
asm
volatile
(
"mov %0=ar.k5"
:
"=r"
(
r
));
break
;
case
6
:
asm
volatile
(
"mov %0=ar.k6"
:
"=r"
(
r
));
break
;
case
7
:
asm
volatile
(
"mov %0=ar.k7"
:
"=r"
(
r
));
break
;
}
return
r
;
}
extern
void
ia64_getreg_unknown_kr
(
void
);
extern
void
ia64_setreg_unknown_kr
(
void
);
#define ia64_get_kr(regnum) \
({ \
unsigned long r=0; \
\
switch (regnum) { \
case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
default: ia64_getreg_unknown_kr(); break; \
} \
r; \
})
static
inline
void
ia64_set_kr
(
unsigned
long
regnum
,
unsigned
long
r
)
{
switch
(
regnum
)
{
case
0
:
asm
volatile
(
"mov ar.k0=%0"
::
"r"
(
r
));
break
;
case
1
:
asm
volatile
(
"mov ar.k1=%0"
::
"r"
(
r
));
break
;
case
2
:
asm
volatile
(
"mov ar.k2=%0"
::
"r"
(
r
));
break
;
case
3
:
asm
volatile
(
"mov ar.k3=%0"
::
"r"
(
r
));
break
;
case
4
:
asm
volatile
(
"mov ar.k4=%0"
::
"r"
(
r
));
break
;
case
5
:
asm
volatile
(
"mov ar.k5=%0"
::
"r"
(
r
));
break
;
case
6
:
asm
volatile
(
"mov ar.k6=%0"
::
"r"
(
r
));
break
;
case
7
:
asm
volatile
(
"mov ar.k7=%0"
::
"r"
(
r
));
break
;
}
}
#define ia64_set_kr(regnum, r) \
({ \
switch (regnum) { \
case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
default: ia64_setreg_unknown_kr(); break; \
}
\
}
)
/*
* The following three macros can't be inline functions because we don't have struct
...
...
@@ -423,8 +428,8 @@ extern void ia32_save_state (struct task_struct *task);
extern
void
ia32_load_state
(
struct
task_struct
*
task
);
#endif
#define ia64_fph_enable()
asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
#define ia64_fph_disable()
asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
#define ia64_fph_enable()
do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
#define ia64_fph_disable()
do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
/* load fp 0.0 into fph */
static
inline
void
...
...
@@ -450,78 +455,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) {
ia64_fph_disable
();
}
static
inline
void
ia64_fc
(
void
*
addr
)
{
asm
volatile
(
"fc %0"
::
"r"
(
addr
)
:
"memory"
);
}
static
inline
void
ia64_sync_i
(
void
)
{
asm
volatile
(
";; sync.i"
:::
"memory"
);
}
static
inline
void
ia64_srlz_i
(
void
)
{
asm
volatile
(
";; srlz.i ;;"
:::
"memory"
);
}
static
inline
void
ia64_srlz_d
(
void
)
{
asm
volatile
(
";; srlz.d"
:::
"memory"
);
}
static
inline
__u64
ia64_get_rr
(
__u64
reg_bits
)
{
__u64
r
;
asm
volatile
(
"mov %0=rr[%1]"
:
"=r"
(
r
)
:
"r"
(
reg_bits
)
:
"memory"
);
return
r
;
}
static
inline
void
ia64_set_rr
(
__u64
reg_bits
,
__u64
rr_val
)
{
asm
volatile
(
"mov rr[%0]=%1"
::
"r"
(
reg_bits
),
"r"
(
rr_val
)
:
"memory"
);
}
static
inline
__u64
ia64_get_dcr
(
void
)
{
__u64
r
;
asm
volatile
(
"mov %0=cr.dcr"
:
"=r"
(
r
));
return
r
;
}
static
inline
void
ia64_set_dcr
(
__u64
val
)
{
asm
volatile
(
"mov cr.dcr=%0;;"
::
"r"
(
val
)
:
"memory"
);
ia64_srlz_d
();
}
static
inline
__u64
ia64_get_lid
(
void
)
{
__u64
r
;
asm
volatile
(
"mov %0=cr.lid"
:
"=r"
(
r
));
return
r
;
}
static
inline
void
ia64_invala
(
void
)
{
asm
volatile
(
"invala"
:::
"memory"
);
}
static
inline
__u64
ia64_clear_ic
(
void
)
{
__u64
psr
;
asm
volatile
(
"mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;"
:
"=r"
(
psr
)
::
"memory"
);
psr
=
ia64_getreg
(
_IA64_REG_PSR
);
ia64_stop
();
ia64_rsm
(
IA64_PSR_I
|
IA64_PSR_IC
);
ia64_srlz_i
();
return
psr
;
}
...
...
@@ -531,7 +472,9 @@ ia64_clear_ic (void)
static
inline
void
ia64_set_psr
(
__u64
psr
)
{
asm
volatile
(
";; mov psr.l=%0;; srlz.d"
::
"r"
(
psr
)
:
"memory"
);
ia64_stop
();
ia64_setreg
(
_IA64_REG_PSR_L
,
psr
);
ia64_srlz_d
();
}
/*
...
...
@@ -543,14 +486,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num,
__u64
vmaddr
,
__u64
pte
,
__u64
log_page_size
)
{
asm
volatile
(
"mov cr.itir=%0"
::
"r"
(
log_page_size
<<
2
)
:
"memory"
);
asm
volatile
(
"mov cr.ifa=%0;;"
::
"r"
(
vmaddr
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_ITIR
,
(
log_page_size
<<
2
));
ia64_setreg
(
_IA64_REG_CR_IFA
,
vmaddr
);
ia64_stop
();
if
(
target_mask
&
0x1
)
asm
volatile
(
"itr.i itr[%0]=%1"
::
"r"
(
tr_num
),
"r"
(
pte
)
:
"memory"
);
ia64_itri
(
tr_num
,
pte
);
if
(
target_mask
&
0x2
)
asm
volatile
(
";;itr.d dtr[%0]=%1"
::
"r"
(
tr_num
),
"r"
(
pte
)
:
"memory"
);
ia64_itrd
(
tr_num
,
pte
);
}
/*
...
...
@@ -561,13 +503,14 @@ static inline void
ia64_itc
(
__u64
target_mask
,
__u64
vmaddr
,
__u64
pte
,
__u64
log_page_size
)
{
asm
volatile
(
"mov cr.itir=%0"
::
"r"
(
log_page_size
<<
2
)
:
"memory"
);
asm
volatile
(
"mov cr.ifa=%0;;"
::
"r"
(
vmaddr
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_ITIR
,
(
log_page_size
<<
2
));
ia64_setreg
(
_IA64_REG_CR_IFA
,
vmaddr
);
ia64_stop
();
/* as per EAS2.6, itc must be the last instruction in an instruction group */
if
(
target_mask
&
0x1
)
asm
volatile
(
"itc.i %0;;"
::
"r"
(
pte
)
:
"memory"
);
ia64_itci
(
pte
);
if
(
target_mask
&
0x2
)
asm
volatile
(
";;itc.d %0;;"
::
"r"
(
pte
)
:
"memory"
);
ia64_itcd
(
pte
);
}
/*
...
...
@@ -578,16 +521,17 @@ static inline void
ia64_ptr
(
__u64
target_mask
,
__u64
vmaddr
,
__u64
log_size
)
{
if
(
target_mask
&
0x1
)
asm
volatile
(
"ptr.i %0,%1"
::
"r"
(
vmaddr
),
"r"
(
log_size
<<
2
));
ia64_ptri
(
vmaddr
,
(
log_size
<<
2
));
if
(
target_mask
&
0x2
)
asm
volatile
(
"ptr.d %0,%1"
::
"r"
(
vmaddr
),
"r"
(
log_size
<<
2
));
ia64_ptrd
(
vmaddr
,
(
log_size
<<
2
));
}
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
static
inline
void
ia64_set_iva
(
void
*
ivt_addr
)
{
asm
volatile
(
"mov cr.iva=%0;; srlz.i;;"
::
"r"
(
ivt_addr
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_IVA
,
(
__u64
)
ivt_addr
);
ia64_srlz_i
();
}
/* Set the page table address and control bits. */
...
...
@@ -595,79 +539,33 @@ static inline void
ia64_set_pta
(
__u64
pta
)
{
/* Note: srlz.i implies srlz.d */
asm
volatile
(
"mov cr.pta=%0;; srlz.i;;"
::
"r"
(
pta
)
:
"memory"
);
}
static
inline
__u64
ia64_get_cpuid
(
__u64
regnum
)
{
__u64
r
;
asm
(
"mov %0=cpuid[%r1]"
:
"=r"
(
r
)
:
"rO"
(
regnum
));
return
r
;
ia64_setreg
(
_IA64_REG_CR_PTA
,
pta
);
ia64_srlz_i
();
}
static
inline
void
ia64_eoi
(
void
)
{
asm
(
"mov cr.eoi=r0;; srlz.d;;"
:::
"memory"
);
ia64_setreg
(
_IA64_REG_CR_EOI
,
0
);
ia64_srlz_d
();
}
#define cpu_relax() ia64_hint(ia64_hint_pause)
static
inline
void
ia64_set_lrr0
(
unsigned
long
val
)
{
asm
volatile
(
"mov cr.lrr0=%0;; srlz.d"
::
"r"
(
val
)
:
"memory"
);
ia64_setreg
(
_IA64_REG_CR_LRR0
,
val
);
ia64_srlz_d
();
}
static
inline
void
ia64_hint_pause
(
void
)
{
asm
volatile
(
"hint @pause"
:::
"memory"
);
}
#define cpu_relax() ia64_hint_pause()
static
inline
void
ia64_set_lrr1
(
unsigned
long
val
)
{
asm
volatile
(
"mov cr.lrr1=%0;; srlz.d"
::
"r"
(
val
)
:
"memory"
);
}
static
inline
void
ia64_set_pmv
(
__u64
val
)
{
asm
volatile
(
"mov cr.pmv=%0"
::
"r"
(
val
)
:
"memory"
);
}
static
inline
__u64
ia64_get_pmc
(
__u64
regnum
)
{
__u64
retval
;
asm
volatile
(
"mov %0=pmc[%1]"
:
"=r"
(
retval
)
:
"r"
(
regnum
));
return
retval
;
}
static
inline
void
ia64_set_pmc
(
__u64
regnum
,
__u64
value
)
{
asm
volatile
(
"mov pmc[%0]=%1"
::
"r"
(
regnum
),
"r"
(
value
));
}
static
inline
__u64
ia64_get_pmd
(
__u64
regnum
)
{
__u64
retval
;
asm
volatile
(
"mov %0=pmd[%1]"
:
"=r"
(
retval
)
:
"r"
(
regnum
));
return
retval
;
ia64_setreg
(
_IA64_REG_CR_LRR1
,
val
);
ia64_srlz_d
();
}
static
inline
void
ia64_set_pmd
(
__u64
regnum
,
__u64
value
)
{
asm
volatile
(
"mov pmd[%0]=%1"
::
"r"
(
regnum
),
"r"
(
value
));
}
/*
* Given the address to which a spill occurred, return the unat bit
...
...
@@ -713,160 +611,35 @@ thread_saved_pc (struct task_struct *t)
* Get the current instruction/program counter value.
*/
#define current_text_addr() \
({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; })
/*
* Set the correctable machine check vector register
*/
static
inline
void
ia64_set_cmcv
(
__u64
val
)
{
asm
volatile
(
"mov cr.cmcv=%0"
::
"r"
(
val
)
:
"memory"
);
}
/*
* Read the correctable machine check vector register
*/
static
inline
__u64
ia64_get_cmcv
(
void
)
{
__u64
val
;
asm
volatile
(
"mov %0=cr.cmcv"
:
"=r"
(
val
)
::
"memory"
);
return
val
;
}
({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
static
inline
__u64
ia64_get_ivr
(
void
)
{
__u64
r
;
asm
volatile
(
"srlz.d;; mov %0=cr.ivr;; srlz.d;;"
:
"=r"
(
r
));
return
r
;
}
static
inline
void
ia64_set_tpr
(
__u64
val
)
{
asm
volatile
(
"mov cr.tpr=%0"
::
"r"
(
val
));
}
static
inline
__u64
ia64_get_tpr
(
void
)
{
__u64
r
;
asm
volatile
(
"mov %0=cr.tpr"
:
"=r"
(
r
));
return
r
;
}
static
inline
void
ia64_set_irr0
(
__u64
val
)
{
asm
volatile
(
"mov cr.irr0=%0;;"
::
"r"
(
val
)
:
"memory"
);
ia64_srlz_d
();
}
static
inline
__u64
ia64_get_irr0
(
void
)
{
__u64
val
;
/* this is volatile because irr may change unbeknownst to gcc... */
asm
volatile
(
"mov %0=cr.irr0"
:
"=r"
(
val
));
return
val
;
}
static
inline
void
ia64_set_irr1
(
__u64
val
)
{
asm
volatile
(
"mov cr.irr1=%0;;"
::
"r"
(
val
)
:
"memory"
);
r
=
ia64_getreg
(
_IA64_REG_CR_IVR
);
ia64_srlz_d
();
}
static
inline
__u64
ia64_get_irr1
(
void
)
{
__u64
val
;
/* this is volatile because irr may change unbeknownst to gcc... */
asm
volatile
(
"mov %0=cr.irr1"
:
"=r"
(
val
));
return
val
;
}
static
inline
void
ia64_set_irr2
(
__u64
val
)
{
asm
volatile
(
"mov cr.irr2=%0;;"
::
"r"
(
val
)
:
"memory"
);
ia64_srlz_d
();
}
static
inline
__u64
ia64_get_irr2
(
void
)
{
__u64
val
;
/* this is volatile because irr may change unbeknownst to gcc... */
asm
volatile
(
"mov %0=cr.irr2"
:
"=r"
(
val
));
return
val
;
}
static
inline
void
ia64_set_irr3
(
__u64
val
)
{
asm
volatile
(
"mov cr.irr3=%0;;"
::
"r"
(
val
)
:
"memory"
);
ia64_srlz_d
();
}
static
inline
__u64
ia64_get_irr3
(
void
)
{
__u64
val
;
/* this is volatile because irr may change unbeknownst to gcc... */
asm
volatile
(
"mov %0=cr.irr3"
:
"=r"
(
val
));
return
val
;
}
static
inline
__u64
ia64_get_gp
(
void
)
{
__u64
val
;
asm
(
"mov %0=gp"
:
"=r"
(
val
));
return
val
;
}
static
inline
void
ia64_set_ibr
(
__u64
regnum
,
__u64
value
)
{
asm
volatile
(
"mov ibr[%0]=%1"
::
"r"
(
regnum
),
"r"
(
value
));
return
r
;
}
static
inline
void
ia64_set_dbr
(
__u64
regnum
,
__u64
value
)
{
asm
volatile
(
"mov dbr[%0]=%1"
::
"r"
(
regnum
),
"r"
(
value
)
);
__ia64_set_dbr
(
regnum
,
value
);
#ifdef CONFIG_ITANIUM
asm
volatile
(
";; srlz.d"
);
ia64_srlz_d
(
);
#endif
}
static
inline
__u64
ia64_get_ibr
(
__u64
regnum
)
{
__u64
retval
;
asm
volatile
(
"mov %0=ibr[%1]"
:
"=r"
(
retval
)
:
"r"
(
regnum
));
return
retval
;
}
static
inline
__u64
ia64_get_dbr
(
__u64
regnum
)
{
__u64
retval
;
asm
volatile
(
"mov %0=dbr[%1]"
:
"=r"
(
retval
)
:
"r"
(
regnum
)
);
retval
=
__ia64_get_dbr
(
regnum
);
#ifdef CONFIG_ITANIUM
asm
volatile
(
";; srlz.d"
);
ia64_srlz_d
(
);
#endif
return
retval
;
}
...
...
@@ -883,29 +656,13 @@ ia64_get_dbr (__u64 regnum)
# define ia64_rotr(w,n) \
({ \
__u64 result; \
asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n));
\
result = ia64_shrp((w), (w), (n));
\
result; \
})
#endif
#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
static
inline
__u64
ia64_thash
(
__u64
addr
)
{
__u64
result
;
asm
(
"thash %0=%1"
:
"=r"
(
result
)
:
"r"
(
addr
));
return
result
;
}
static
inline
__u64
ia64_tpa
(
__u64
addr
)
{
__u64
result
;
asm
(
"tpa %0=%1"
:
"=r"
(
result
)
:
"r"
(
addr
));
return
result
;
}
/*
* Take a mapped kernel address and return the equivalent address
* in the region 7 identity mapped virtual area.
...
...
@@ -914,7 +671,7 @@ static inline void *
ia64_imva
(
void
*
addr
)
{
void
*
result
;
asm
(
"tpa %0=%1"
:
"=r"
(
result
)
:
"r"
(
addr
)
);
result
=
(
void
*
)
ia64_tpa
(
addr
);
return
__va
(
result
);
}
...
...
@@ -926,13 +683,13 @@ ia64_imva (void *addr)
static
inline
void
prefetch
(
const
void
*
x
)
{
__asm__
__volatile__
(
"lfetch [%0]"
:
:
"r"
(
x
)
);
ia64_lfetch
(
ia64_lfhint_none
,
x
);
}
static
inline
void
prefetchw
(
const
void
*
x
)
{
__asm__
__volatile__
(
"lfetch.excl [%0]"
:
:
"r"
(
x
)
);
ia64_lfetch_excl
(
ia64_lfhint_none
,
x
);
}
#define spin_lock_prefetch(x) prefetchw(x)
...
...
include/asm-ia64/rwsem.h
View file @
708df1b0
...
...
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <asm/intrinsics.h>
/*
* the semaphore definition
...
...
@@ -82,8 +83,9 @@ static inline void
__down_read
(
struct
rw_semaphore
*
sem
)
{
int
result
;
__asm__
__volatile__
(
"fetchadd4.acq %0=[%1],1"
:
"=r"
(
result
)
:
"r"
(
&
sem
->
count
)
:
"memory"
);
result
=
ia64_fetchadd4_acq
((
unsigned
int
*
)
&
sem
->
count
,
1
);
if
(
result
<
0
)
rwsem_down_read_failed
(
sem
);
}
...
...
@@ -112,8 +114,9 @@ static inline void
__up_read
(
struct
rw_semaphore
*
sem
)
{
int
result
;
__asm__
__volatile__
(
"fetchadd4.rel %0=[%1],-1"
:
"=r"
(
result
)
:
"r"
(
&
sem
->
count
)
:
"memory"
);
result
=
ia64_fetchadd4_rel
((
unsigned
int
*
)
&
sem
->
count
,
-
1
);
if
(
result
<
0
&&
(
--
result
&
RWSEM_ACTIVE_MASK
)
==
0
)
rwsem_wake
(
sem
);
}
...
...
include/asm-ia64/sal.h
View file @
708df1b0
...
...
@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
extern
unsigned
long
sal_platform_features
;
struct
sal_ret_values
{
long
r8
;
long
r9
;
long
r10
;
long
r11
;
};
#endif
/* __ASSEMBLY__ */
#endif
/* _ASM_IA64_PAL_H */
include/asm-ia64/smp.h
View file @
708df1b0
...
...
@@ -120,7 +120,7 @@ hard_smp_processor_id (void)
unsigned
long
bits
;
}
lid
;
lid
.
bits
=
ia64_get
_lid
();
lid
.
bits
=
ia64_get
reg
(
_IA64_REG_CR_LID
);
return
lid
.
f
.
id
<<
8
|
lid
.
f
.
eid
;
}
...
...
include/asm-ia64/sn/sn2/io.h
View file @
708df1b0
...
...
@@ -11,11 +11,23 @@
extern
void
*
sn_io_addr
(
unsigned
long
port
);
/* Forward definition */
extern
void
sn_mmiob
(
void
);
/* Forward definition */
#include <asm/intrinsics.h>
#define __sn_mf_a()
__asm__ __volatile__ ("mf.a" ::: "memory"
)
#define __sn_mf_a()
ia64_mfa(
)
extern
void
sn_dma_flush
(
unsigned
long
);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
...
...
@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long);
*/
static
inline
unsigned
int
__sn_inb
(
unsigned
long
port
)
__
_
sn_inb
(
unsigned
long
port
)
{
volatile
unsigned
char
*
addr
;
unsigned
char
ret
=
-
1
;
...
...
@@ -40,7 +52,7 @@ __sn_inb (unsigned long port)
}
static
inline
unsigned
int
__sn_inw
(
unsigned
long
port
)
__
_
sn_inw
(
unsigned
long
port
)
{
volatile
unsigned
short
*
addr
;
unsigned
short
ret
=
-
1
;
...
...
@@ -54,7 +66,7 @@ __sn_inw (unsigned long port)
}
static
inline
unsigned
int
__sn_inl
(
unsigned
long
port
)
__
_
sn_inl
(
unsigned
long
port
)
{
volatile
unsigned
int
*
addr
;
unsigned
int
ret
=
-
1
;
...
...
@@ -68,7 +80,7 @@ __sn_inl (unsigned long port)
}
static
inline
void
__sn_outb
(
unsigned
char
val
,
unsigned
long
port
)
__
_
sn_outb
(
unsigned
char
val
,
unsigned
long
port
)
{
volatile
unsigned
char
*
addr
;
...
...
@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port)
}
static
inline
void
__sn_outw
(
unsigned
short
val
,
unsigned
long
port
)
__
_
sn_outw
(
unsigned
short
val
,
unsigned
long
port
)
{
volatile
unsigned
short
*
addr
;
...
...
@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port)
}
static
inline
void
__sn_outl
(
unsigned
int
val
,
unsigned
long
port
)
__
_
sn_outl
(
unsigned
int
val
,
unsigned
long
port
)
{
volatile
unsigned
int
*
addr
;
...
...
@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port)
*/
static
inline
unsigned
char
__sn_readb
(
void
*
addr
)
__
_
sn_readb
(
void
*
addr
)
{
unsigned
char
val
;
...
...
@@ -121,7 +133,7 @@ __sn_readb (void *addr)
}
static
inline
unsigned
short
__sn_readw
(
void
*
addr
)
__
_
sn_readw
(
void
*
addr
)
{
unsigned
short
val
;
...
...
@@ -132,7 +144,7 @@ __sn_readw (void *addr)
}
static
inline
unsigned
int
__sn_readl
(
void
*
addr
)
__
_
sn_readl
(
void
*
addr
)
{
unsigned
int
val
;
...
...
@@ -143,7 +155,7 @@ __sn_readl (void *addr)
}
static
inline
unsigned
long
__sn_readq
(
void
*
addr
)
__
_
sn_readq
(
void
*
addr
)
{
unsigned
long
val
;
...
...
include/asm-ia64/sn/sn_cpuid.h
View file @
708df1b0
...
...
@@ -89,7 +89,7 @@
#ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_get
_lid(
) >> 16) & 0xffff)
#define cpu_physical_id(cpuid) ((ia64_get
reg(_IA64_REG_CR_LID
) >> 16) & 0xffff)
#endif
/*
...
...
@@ -98,8 +98,8 @@
*/
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define get_nasid() ((ia64_get
_lid(
) >> 16) & 0xfff)
#define get_slice() ((ia64_get
_lid(
) >> 28) & 0xf)
#define get_nasid() ((ia64_get
reg(_IA64_REG_CR_LID
) >> 16) & 0xfff)
#define get_slice() ((ia64_get
reg(_IA64_REG_CR_LID
) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
/*
...
...
include/asm-ia64/spinlock.h
View file @
708df1b0
...
...
@@ -10,10 +10,12 @@
*/
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
#include <asm/intrinsics.h>
typedef
struct
{
volatile
unsigned
int
lock
;
...
...
@@ -102,8 +104,8 @@ typedef struct {
do { \
rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr,
"acq") < 0)) {
\
ia64_fetchadd(-1, (int *) __read_lock_ptr,
"rel"
); \
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr,
acq) < 0)) {
\
ia64_fetchadd(-1, (int *) __read_lock_ptr,
rel
); \
while (*(volatile int *)__read_lock_ptr < 0) \
cpu_relax(); \
} \
...
...
@@ -112,7 +114,7 @@ do { \
#define _raw_read_unlock(rw) \
do { \
rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr,
"rel"
); \
ia64_fetchadd(-1, (int *) __read_lock_ptr,
rel
); \
} while (0)
#define _raw_write_lock(rw) \
...
...
include/asm-ia64/system.h
View file @
708df1b0
...
...
@@ -55,12 +55,6 @@ extern struct ia64_boot_param {
__u64
initrd_size
;
}
*
ia64_boot_param
;
static
inline
void
ia64_insn_group_barrier
(
void
)
{
__asm__
__volatile__
(
";;"
:::
"memory"
);
}
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
...
...
@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
#define mb()
__asm__ __volatile__ ("mf" ::: "memory"
)
#define mb()
ia64_mf(
)
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
...
...
@@ -119,22 +113,28 @@ ia64_insn_group_barrier (void)
/* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */
#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \
"rsm psr.i;;" \
: "=r" (x) :: "memory")
#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
:: "r" ((x) & IA64_PSR_I) \
: "p6", "p7", "memory")
#define __local_irq_save(x) \
do { \
unsigned long psr; \
psr = ia64_getreg(_IA64_REG_PSR); \
ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
(x) = psr; \
} while (0)
#define __local_irq_disable() \
do { \
ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
} while (0)
#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
#ifdef CONFIG_IA64_DEBUG_IRQ
extern
unsigned
long
last_cli_ip
;
# define __save_ip()
__asm__ ("mov %0=ip" : "=r" (last_cli_ip)
)
# define __save_ip()
last_cli_ip = ia64_getreg(_IA64_REG_IP
)
# define local_irq_save(x) \
do { \
...
...
@@ -164,8 +164,8 @@ do { \
# define local_irq_restore(x) __local_irq_restore(x)
#endif
/* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable()
__asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory"
)
#define local_save_flags(flags)
__asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory"
)
#define local_irq_enable()
({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }
)
#define local_save_flags(flags)
({ (flags) = ia64_getreg(_IA64_REG_PSR); }
)
#define irqs_disabled() \
({ \
...
...
include/asm-ia64/timex.h
View file @
708df1b0
...
...
@@ -11,6 +11,7 @@
*/
#include <asm/processor.h>
#include <asm/intrinsics.h>
typedef
unsigned
long
cycles_t
;
...
...
@@ -32,7 +33,7 @@ get_cycles (void)
{
cycles_t
ret
;
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
ret
)
);
ret
=
ia64_getreg
(
_IA64_REG_AR_ITC
);
return
ret
;
}
...
...
include/asm-ia64/tlbflush.h
View file @
708df1b0
...
...
@@ -12,6 +12,7 @@
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/intrinsics.h>
/*
* Now for some TLB flushing routines. This is the kind of stuff that
...
...
@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
flush_tlb_range
(
vma
,
(
addr
&
PAGE_MASK
),
(
addr
&
PAGE_MASK
)
+
PAGE_SIZE
);
#else
if
(
vma
->
vm_mm
==
current
->
active_mm
)
asm
volatile
(
"ptc.l %0,%1"
::
"r"
(
addr
),
"r"
(
PAGE_SHIFT
<<
2
)
:
"memory"
);
ia64_ptcl
(
addr
,
(
PAGE_SHIFT
<<
2
)
);
else
vma
->
vm_mm
->
context
=
0
;
#endif
...
...
include/asm-ia64/unistd.h
View file @
708df1b0
...
...
@@ -334,73 +334,18 @@ waitpid (int pid, int * wait_stat, int flags)
}
static
inline
int
execve
(
const
char
*
filename
,
char
*
const
av
[],
char
*
const
ep
[])
{
register
long
r8
asm
(
"r8"
);
register
long
r10
asm
(
"r10"
);
register
long
r15
asm
(
"r15"
)
=
__NR_execve
;
register
long
out0
asm
(
"out0"
)
=
(
long
)
filename
;
register
long
out1
asm
(
"out1"
)
=
(
long
)
av
;
register
long
out2
asm
(
"out2"
)
=
(
long
)
ep
;
asm
volatile
(
"break "
__stringify
(
__BREAK_SYSCALL
)
";;
\n\t
"
:
"=r"
(
r8
),
"=r"
(
r10
),
"=r"
(
r15
),
"=r"
(
out0
),
"=r"
(
out1
),
"=r"
(
out2
)
:
"2"
(
r15
),
"3"
(
out0
),
"4"
(
out1
),
"5"
(
out2
)
:
"memory"
,
"out3"
,
"out4"
,
"out5"
,
"out6"
,
"out7"
,
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2"
,
"r3"
,
"r9"
,
"r11"
,
"r12"
,
"r14"
,
"r16"
,
"r17"
,
"r18"
,
"r19"
,
"r20"
,
"r21"
,
"r22"
,
"r23"
,
"r24"
,
"r25"
,
"r26"
,
"r27"
,
"r28"
,
"r29"
,
"r30"
,
"r31"
,
/* Predicate registers. */
"p6"
,
"p7"
,
"p8"
,
"p9"
,
"p10"
,
"p11"
,
"p12"
,
"p13"
,
"p14"
,
"p15"
,
/* Non-rotating fp registers. */
"f6"
,
"f7"
,
"f8"
,
"f9"
,
"f10"
,
"f11"
,
"f12"
,
"f13"
,
"f14"
,
"f15"
,
/* Branch registers. */
"b6"
,
"b7"
);
return
r8
;
}
static
inline
pid_t
clone
(
unsigned
long
flags
,
void
*
sp
)
{
register
long
r8
asm
(
"r8"
);
register
long
r10
asm
(
"r10"
);
register
long
r15
asm
(
"r15"
)
=
__NR_clone
;
register
long
out0
asm
(
"out0"
)
=
(
long
)
flags
;
register
long
out1
asm
(
"out1"
)
=
(
long
)
sp
;
long
retval
;
/* clone clobbers current, hence the "r13" in the clobbers list */
asm
volatile
(
"break "
__stringify
(
__BREAK_SYSCALL
)
";;
\n\t
"
:
"=r"
(
r8
),
"=r"
(
r10
),
"=r"
(
r15
),
"=r"
(
out0
),
"=r"
(
out1
)
:
"2"
(
r15
),
"3"
(
out0
),
"4"
(
out1
)
:
"memory"
,
"out2"
,
"out3"
,
"out4"
,
"out5"
,
"out6"
,
"out7"
,
"r13"
,
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2"
,
"r3"
,
"r9"
,
"r11"
,
"r12"
,
"r14"
,
"r16"
,
"r17"
,
"r18"
,
"r19"
,
"r20"
,
"r21"
,
"r22"
,
"r23"
,
"r24"
,
"r25"
,
"r26"
,
"r27"
,
"r28"
,
"r29"
,
"r30"
,
"r31"
,
/* Predicate registers. */
"p6"
,
"p7"
,
"p8"
,
"p9"
,
"p10"
,
"p11"
,
"p12"
,
"p13"
,
"p14"
,
"p15"
,
/* Non-rotating fp registers. */
"f6"
,
"f7"
,
"f8"
,
"f9"
,
"f10"
,
"f11"
,
"f12"
,
"f13"
,
"f14"
,
"f15"
,
/* Branch registers. */
"b6"
,
"b7"
);
retval
=
r8
;
return
retval
;;
}
extern
int
execve
(
const
char
*
filename
,
char
*
const
av
[],
char
*
const
ep
[]);
extern
pid_t
clone
(
unsigned
long
flags
,
void
*
sp
);
#endif
/* __KERNEL_SYSCALLS__ */
/*
* "Conditional" syscalls
*
* What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on
* all toolchains, so we just do it by hand. Note, this macro can only be used in the
* Note, this macro can only be used in the
* file which defines sys_ni_syscall, i.e., in kernel/sys.c.
*/
#define cond_syscall(x) asm
(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"
);
#define cond_syscall(x) asm
linkage long x() __attribute__((weak,alias("sys_ni_syscall"))
);
#endif
/* !__ASSEMBLY__ */
#endif
/* __KERNEL__ */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment