Commit 708df1b0 authored by Suresh B. Siddha's avatar Suresh B. Siddha Committed by David Mosberger

[PATCH] ia64: cleanup inline assembly

parent cec5d408
...@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE ...@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o \ $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/lib.a FORCE lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld) $(call if_changed,ld)
...@@ -21,6 +21,7 @@ struct task_struct; /* forward declaration for elf.h */ ...@@ -21,6 +21,7 @@ struct task_struct; /* forward declaration for elf.h */
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/intrinsics.h>
/* Simulator system calls: */ /* Simulator system calls: */
...@@ -54,9 +55,9 @@ struct disk_stat { ...@@ -54,9 +55,9 @@ struct disk_stat {
}; };
#include "../kernel/fw-emu.c" #include "../kernel/fw-emu.c"
extern void jmp_to_kernel(ulong sp, ulong bp, ulong e_entry);
extern void __bsw1(void);
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
asm (".global printk; printk = 0");
/* /*
* Set a break point on this function so that symbols are available to set breakpoints in * Set a break point on this function so that symbols are available to set breakpoints in
...@@ -98,9 +99,12 @@ _start (void) ...@@ -98,9 +99,12 @@ _start (void)
char *kpath, *args; char *kpath, *args;
long arglen = 0; long arglen = 0;
asm volatile ("movl gp=__gp;;" ::: "memory"); extern __u64 __gp;
asm volatile ("mov sp=%0" :: "r"(stack) : "memory"); register unsigned long tmp = (unsigned long) &stack[0];
asm volatile ("bsw.1;;");
ia64_setreg(_IA64_REG_GP, __gp);
ia64_setreg(_IA64_REG_SP, tmp);
__bsw1();
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
...@@ -195,15 +199,15 @@ _start (void) ...@@ -195,15 +199,15 @@ _start (void)
cons_write("starting kernel...\n"); cons_write("starting kernel...\n");
/* fake an I/O base address: */ /* fake an I/O base address: */
asm volatile ("mov ar.k0=%0" :: "r"(0xffffc000000UL)); ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL);
bp = sys_fw_init(args, arglen); bp = sys_fw_init(args, arglen);
ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS);
debug_break(); debug_break();
asm volatile ("mov sp=%2; mov r28=%1; br.sptk.few %0" tmp = __pa(&stack);
:: "b"(e_entry), "r"(bp), "r"(__pa(&stack))); jmp_to_kernel(tmp, (unsigned long) bp, e_entry);
cons_write("kernel returned!\n"); cons_write("kernel returned!\n");
ssc(-1, 0, 0, 0, SSC_EXIT); ssc(-1, 0, 0, 0, SSC_EXIT);
......
#include <asm/asmmacro.h>
GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0
mov r15=in4
break 0x80001
br.ret.sptk.many b0
END(ssc)
GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
movl r9=0x100000000 /* tc.base */
movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp
1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;;
1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */
mov r10=0 /* hints */
mov r11=0
br.cond.sptk.few rp
1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f
mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */
;;
mov ar.lc=r8
movl r8=0xe000000000000000
;;
.loop: fc r8
add r8=32,r8
br.cloop.sptk.few .loop
sync.i
;;
srlz.i
;;
mov ar.lc=r9
mov r8=r0
;;
1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */
mov r10=0 /* reserved */
mov r11=0 /* reserved */
mov r16=0xffff /* implemented PMC */
mov r17=0xffff /* implemented PMD */
add r18=8,r29 /* second index */
;;
st8 [r29]=r16,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store implemented PMD */
st8 [r18]=r0,16 /* clear remaining bits */
mov r16=0xf0 /* cycles count capable PMC */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
mov r17=0x10 /* retired bundles capable PMC */
;;
st8 [r29]=r16,16 /* store cycles capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store retired bundle capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
1: br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
END(pal_emulator_static)
GLOBAL_ENTRY(jmp_to_kernel)
.regstk 3,0,0,0
mov sp=in0
mov r28=in1
mov b7=in0
br.sptk.few b7
END(jmp_to_kernel)
GLOBAL_ENTRY(__bsw1)
bsw.1
;;
br.ret.sptk.many b0
END(__bsw1)
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
.global printk; printk = 0
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
# #
obj-y := hpsim_irq.o hpsim_setup.o obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o obj-$(CONFIG_HP_SIMETH) += simeth.o
......
#include <asm/asmmacro.h>
/*
* Simulator system call.
*/
GLOBAL_ENTRY(ia64_ssc)
mov r15=r36
break 0x80001
br.ret.sptk.many rp
END(ia64_ssc)
...@@ -25,19 +25,6 @@ ...@@ -25,19 +25,6 @@
#include "hpsim_ssc.h" #include "hpsim_ssc.h"
/*
* Simulator system call.
*/
asm (".text\n"
".align 32\n"
".global ia64_ssc\n"
".proc ia64_ssc\n"
"ia64_ssc:\n"
"mov r15=r36\n"
"break 0x80001\n"
"br.ret.sptk.many rp\n"
".endp\n");
void void
ia64_ssc_connect_irq (long intr, long irq) ia64_ssc_connect_irq (long intr, long irq)
{ {
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#define __IA32_NR_sigreturn 119 #define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173 #define __IA32_NR_rt_sigreturn 173
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register double f16 asm ("f16"); register double f17 asm ("f17"); register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19"); register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21"); register double f20 asm ("f20"); register double f21 asm ("f21");
...@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
struct sigframe_ia32 struct sigframe_ia32
{ {
...@@ -198,30 +201,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) ...@@ -198,30 +201,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* All other fields unused... * All other fields unused...
*/ */
#define __ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define __stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
static int static int
save_ia32_fpstate_live (struct _fpstate_ia32 *save) save_ia32_fpstate_live (struct _fpstate_ia32 *save)
{ {
...@@ -239,17 +218,18 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -239,17 +218,18 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
return -EFAULT; return -EFAULT;
/* Readin fsr, fcr, fir, fdr and copy onto fpstate */ /* Readin fsr, fcr, fir, fdr and copy onto fpstate */
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); fcr = ia64_getreg(_IA64_REG_AR_FCR);
asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); fir = ia64_getreg(_IA64_REG_AR_FIR);
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); fdr = ia64_getreg(_IA64_REG_AR_FDR);
/* /*
* We need to clear the exception state before calling the signal handler. Clear * We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction. * instruction.
*/ */
new_fsr = fsr & ~0x80ff; new_fsr = fsr & ~0x80ff;
asm volatile ( "mov ar.fsr=%0;" :: "r"(new_fsr)); ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
__put_user(fcr & 0xffff, &save->cw); __put_user(fcr & 0xffff, &save->cw);
__put_user(fsr & 0xffff, &save->sw); __put_user(fsr & 0xffff, &save->sw);
...@@ -286,45 +266,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -286,45 +266,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia64f2ia32f(fpregp, &ptp->f11); ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12); ia64_stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13); ia64_stfe(fpregp, 13);
copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 14); ia64_stfe(fpregp, 14);
copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 15); ia64_stfe(fpregp, 15);
copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stf8(&num128[0], 16); ia64_stf8(&num128[0], 16);
__stf8(&num128[1], 17); ia64_stf8(&num128[1], 17);
copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 18); ia64_stf8(&num128[0], 18);
__stf8(&num128[1], 19); ia64_stf8(&num128[1], 19);
copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 20); ia64_stf8(&num128[0], 20);
__stf8(&num128[1], 21); ia64_stf8(&num128[1], 21);
copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 22); ia64_stf8(&num128[0], 22);
__stf8(&num128[1], 23); ia64_stf8(&num128[1], 23);
copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 24); ia64_stf8(&num128[0], 24);
__stf8(&num128[1], 25); ia64_stf8(&num128[1], 25);
copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 26); ia64_stf8(&num128[0], 26);
__stf8(&num128[1], 27); ia64_stf8(&num128[1], 27);
copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 28); ia64_stf8(&num128[0], 28);
__stf8(&num128[1], 29); ia64_stf8(&num128[1], 29);
copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 30); ia64_stf8(&num128[0], 30);
__stf8(&num128[1], 31); ia64_stf8(&num128[1], 31);
copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
return 0; return 0;
} }
...@@ -354,10 +334,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -354,10 +334,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
* should remain same while writing. * should remain same while writing.
* So, we do a read, change specific fields and write. * So, we do a read, change specific fields and write.
*/ */
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); fcr = ia64_getreg(_IA64_REG_AR_FCR);
asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); fir = ia64_getreg(_IA64_REG_AR_FIR);
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); fdr = ia64_getreg(_IA64_REG_AR_FDR);
__get_user(mxcsr, (unsigned int *)&save->mxcsr); __get_user(mxcsr, (unsigned int *)&save->mxcsr);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
...@@ -391,10 +371,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -391,10 +371,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64 = (num64 << 32) | lo; num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffff)) | num64; fdr = (fdr & (~0xffffffffffff)) | num64;
asm volatile ( "mov ar.fsr=%0;" :: "r"(fsr)); ia64_setreg(_IA64_REG_AR_FSR, fsr);
asm volatile ( "mov ar.fcr=%0;" :: "r"(fcr)); ia64_setreg(_IA64_REG_AR_FCR, fcr);
asm volatile ( "mov ar.fir=%0;" :: "r"(fir)); ia64_setreg(_IA64_REG_AR_FIR, fir);
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr)); ia64_setreg(_IA64_REG_AR_FDR, fdr);
/* /*
* restore f8..f11 onto pt_regs * restore f8..f11 onto pt_regs
...@@ -420,45 +400,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -420,45 +400,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f11, fpregp); ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp); ia64_ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(13, fpregp); ia64_ldfe(13, fpregp);
copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(14, fpregp); ia64_ldfe(14, fpregp);
copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(15, fpregp); ia64_ldfe(15, fpregp);
copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
__ldf8(16, &num128[0]); ia64_ldf8(16, &num128[0]);
__ldf8(17, &num128[1]); ia64_ldf8(17, &num128[1]);
copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
__ldf8(18, &num128[0]); ia64_ldf8(18, &num128[0]);
__ldf8(19, &num128[1]); ia64_ldf8(19, &num128[1]);
copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
__ldf8(20, &num128[0]); ia64_ldf8(20, &num128[0]);
__ldf8(21, &num128[1]); ia64_ldf8(21, &num128[1]);
copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
__ldf8(22, &num128[0]); ia64_ldf8(22, &num128[0]);
__ldf8(23, &num128[1]); ia64_ldf8(23, &num128[1]);
copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
__ldf8(24, &num128[0]); ia64_ldf8(24, &num128[0]);
__ldf8(25, &num128[1]); ia64_ldf8(25, &num128[1]);
copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
__ldf8(26, &num128[0]); ia64_ldf8(26, &num128[0]);
__ldf8(27, &num128[1]); ia64_ldf8(27, &num128[1]);
copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
__ldf8(28, &num128[0]); ia64_ldf8(28, &num128[0]);
__ldf8(29, &num128[1]); ia64_ldf8(29, &num128[1]);
copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
__ldf8(30, &num128[0]); ia64_ldf8(30, &num128[0]);
__ldf8(31, &num128[1]); ia64_ldf8(31, &num128[1]);
return 0; return 0;
} }
...@@ -705,7 +685,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate ...@@ -705,7 +685,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate
/* /*
* `eflags' is in an ar register for this context * `eflags' is in an ar register for this context
*/ */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); flag = ia64_getreg(_IA64_REG_AR_EFLAG);
err |= __put_user((unsigned int)flag, &sc->eflags); err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal); err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss); err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
...@@ -790,10 +770,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int * ...@@ -790,10 +770,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
* IA32 process's context. * IA32 process's context.
*/ */
err |= __get_user(tmpflags, &sc->eflags); err |= __get_user(tmpflags, &sc->eflags);
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); flag = ia64_getreg(_IA64_REG_AR_EFLAG);
flag &= ~0x40DD5; flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5); flag |= (tmpflags & 0x40DD5);
asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag)); ia64_setreg(_IA64_REG_AR_EFLAG, flag);
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/intrinsics.h>
#include "ia32priv.h" #include "ia32priv.h"
...@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task) ...@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task)
void void
ia32_save_state (struct task_struct *t) ia32_save_state (struct task_struct *t)
{ {
unsigned long eflag, fsr, fcr, fir, fdr; t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm ("mov %0=ar.eflag;" t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
"mov %1=ar.fsr;" t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
"mov %2=ar.fcr;" t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
} }
...@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t) ...@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t)
fdr = t->thread.fdr; fdr = t->thread.fdr;
tssd = load_desc(_TSS(nr)); /* TSSD */ tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;" ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
"mov ar.fsr=%1;" ia64_setreg(_IA64_REG_AR_FSR, fsr);
"mov ar.fcr=%2;" ia64_setreg(_IA64_REG_AR_FCR, fcr);
"mov ar.fir=%3;" ia64_setreg(_IA64_REG_AR_FIR, fir);
"mov ar.fdr=%4;" ia64_setreg(_IA64_REG_AR_FDR, fdr);
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
...@@ -178,7 +170,7 @@ void ...@@ -178,7 +170,7 @@ void
ia32_cpu_init (void) ia32_cpu_init (void)
{ {
/* initialize global ia32 state - CR0 and CR4 */ /* initialize global ia32 state - CR0 and CR4 */
asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0)); ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
} }
static int __init static int __init
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "ia32priv.h" #include "ia32priv.h"
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/intrinsics.h>
int int
ia32_intercept (struct pt_regs *regs, unsigned long isr) ia32_intercept (struct pt_regs *regs, unsigned long isr)
...@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) ...@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{ {
unsigned long fsr, fcr; unsigned long fsr, fcr;
asm ("mov %0=ar.fsr;" fsr = ia64_getreg(_IA64_REG_AR_FSR);
"mov %1=ar.fcr;" fcr = ia64_getreg(_IA64_REG_AR_FCR);
: "=r"(fsr), "=r"(fcr));
siginfo.si_signo = SIGFPE; siginfo.si_signo = SIGFPE;
/* /*
......
...@@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm); ...@@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
extern void ia32_load_segment_descriptors (struct task_struct *task); extern void ia32_load_segment_descriptors (struct task_struct *task);
#define ia32f2ia64f(dst,src) \ #define ia32f2ia64f(dst,src) \
do { \ do { \
register double f6 asm ("f6"); \ ia64_ldfe(6,src); \
asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ ia64_stop(); \
} while(0) ia64_stf_spill(dst, 6); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \ #define ia64f2ia32f(dst,src) \
register double f6 asm ("f6"); \ do { \
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ ia64_ldf_fill(6, src); \
} while(0) ia64_stop(); \
ia64_stfe(dst, 6); \
} while(0)
struct user_regs_struct32 { struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax; __u32 ebx, ecx, edx, esi, edi, ebp, eax;
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/intrinsics.h>
#include "ia32priv.h" #include "ia32priv.h"
...@@ -2192,7 +2193,7 @@ sys32_iopl (int level) ...@@ -2192,7 +2193,7 @@ sys32_iopl (int level)
if (level != 3) if (level != 3)
return(-EINVAL); return(-EINVAL);
/* Trying to gain more privileges? */ /* Trying to gain more privileges? */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); old = ia64_getreg(_IA64_REG_AR_EFLAG);
if ((unsigned int) level > ((old >> 12) & 3)) { if ((unsigned int) level > ((old >> 12) & 3)) {
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
...@@ -2216,7 +2217,7 @@ sys32_iopl (int level) ...@@ -2216,7 +2217,7 @@ sys32_iopl (int level)
if (addr >= 0) { if (addr >= 0) {
old = (old & ~0x3000) | (level << 12); old = (old & ~0x3000) | (level << 12);
asm volatile ("mov ar.eflag=%0;;" :: "r"(old)); ia64_setreg(_IA64_REG_AR_EFLAG, old);
} }
fput(file); fput(file);
......
...@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall) ...@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall)
br.ret.sptk.many rp br.ret.sptk.many rp
END(__ia64_syscall) END(__ia64_syscall)
GLOBAL_ENTRY(execve)
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(execve)
GLOBAL_ENTRY(clone)
mov r15=__NR_clone // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(clone)
/* /*
* We invoke syscall_trace through this intermediate function to * We invoke syscall_trace through this intermediate function to
* ensure that the syscall input arguments are not clobbered. We * ensure that the syscall input arguments are not clobbered. We
......
...@@ -46,17 +46,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param) ...@@ -46,17 +46,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
/* /*
* Simulator system call. * Simulator system call.
*/ */
static long extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr);
ssc (long arg0, long arg1, long arg2, long arg3, int nr)
{
register long r8 asm ("r8");
asm volatile ("mov r15=%1\n\t"
"break 0x80001"
: "=r"(r8)
: "r"(nr), "r"(arg0), "r"(arg1), "r"(arg2), "r"(arg3));
return r8;
}
#define SECS_PER_HOUR (60 * 60) #define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24) #define SECS_PER_DAY (SECS_PER_HOUR * 24)
...@@ -127,101 +117,6 @@ offtime (unsigned long t, efi_time_t *tp) ...@@ -127,101 +117,6 @@ offtime (unsigned long t, efi_time_t *tp)
*/ */
extern void pal_emulator_static (void); extern void pal_emulator_static (void);
asm (
" .proc pal_emulator_static\n"
"pal_emulator_static:"
" mov r8=-1\n"
" mov r9=256\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n"
"(p6) br.cond.sptk.few static\n"
" ;;\n"
" mov r9=512\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28\n"
"(p6) br.cond.sptk.few stacked\n"
" ;;\n"
"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" ;;\n"
" mov r8=0 /* status = 0 */\n"
" movl r9=0x100000000 /* tc.base */\n"
" movl r10=0x0000000200000003 /* count[0], count[1] */\n"
" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x100000064 /* proc_ratio (1/100) */\n"
" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n"
" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n"
" ;;\n"
"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" mov r9=96 /* num phys stacked */\n"
" mov r10=0 /* hints */\n"
" mov r11=0\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r9=ar.lc\n"
" movl r8=524288 /* flush 512k million cache lines (16MB) */\n"
" ;;\n"
" mov ar.lc=r8\n"
" movl r8=0xe000000000000000\n"
" ;;\n"
".loop: fc r8\n"
" add r8=32,r8\n"
" br.cloop.sptk.few .loop\n"
" sync.i\n"
" ;;\n"
" srlz.i\n"
" ;;\n"
" mov ar.lc=r9\n"
" mov r8=r0\n"
" ;;\n"
"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */\n"
" mov r10=0 /* reserved */\n"
" mov r11=0 /* reserved */\n"
" mov r16=0xffff /* implemented PMC */\n"
" mov r17=0xffff /* implemented PMD */\n"
" add r18=8,r29 /* second index */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store implemented PMD */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r16=0xf0 /* cycles count capable PMC */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r17=0x10 /* retired bundles capable PMC */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store cycles capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store retired bundle capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
"1: br.cond.sptk.few rp\n"
"stacked:\n"
" br.ret.sptk.few rp\n"
" .endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
...@@ -268,14 +163,14 @@ efi_unimplemented (void) ...@@ -268,14 +163,14 @@ efi_unimplemented (void)
return EFI_UNSUPPORTED; return EFI_UNSUPPORTED;
} }
static long static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2, sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7) unsigned long in6, unsigned long in7)
{ {
register long r9 asm ("r9") = 0; long r9 = 0;
register long r10 asm ("r10") = 0; long r10 = 0;
register long r11 asm ("r11") = 0; long r11 = 0;
long status; long status;
/* /*
...@@ -357,8 +252,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, ...@@ -357,8 +252,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} else { } else {
status = -1; status = -1;
} }
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); return ((struct sal_ret_values) {status, r9, r10, r11});
return status;
} }
......
...@@ -39,4 +39,4 @@ static union { ...@@ -39,4 +39,4 @@ static union {
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task) .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}}; }};
asm (".global init_task; init_task = init_task_mem"); extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
...@@ -497,7 +497,7 @@ iosapic_register_intr (unsigned int gsi, ...@@ -497,7 +497,7 @@ iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger) unsigned long polarity, unsigned long trigger)
{ {
int vector; int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = gsi_to_vector(gsi); vector = gsi_to_vector(gsi);
if (vector < 0) if (vector < 0)
...@@ -574,7 +574,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, ...@@ -574,7 +574,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long trigger) unsigned long trigger)
{ {
int vector; int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = isa_irq_to_vector(isa_irq); vector = isa_irq_to_vector(isa_irq);
...@@ -668,11 +668,11 @@ iosapic_enable_intr (unsigned int vector) ...@@ -668,11 +668,11 @@ iosapic_enable_intr (unsigned int vector)
* Direct the interrupt vector to the current cpu, platform redirection * Direct the interrupt vector to the current cpu, platform redirection
* will distribute them. * will distribute them.
*/ */
dest = (ia64_get_lid() >> 16) & 0xffff; dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
} }
#else #else
/* direct the interrupt vector to the running cpu id */ /* direct the interrupt vector to the running cpu id */
dest = (ia64_get_lid() >> 16) & 0xffff; dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif #endif
set_rte(vector, dest); set_rte(vector, dest);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/intrinsics.h>
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
# include <asm/perfmon.h> # include <asm/perfmon.h>
...@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* because the register and the memory stack are not * because the register and the memory stack are not
* switched atomically. * switched atomically.
*/ */
asm ("mov %0=ar.bsp" : "=r"(bsp)); bsp = ia64_getreg(_IA64_REG_AR_BSP);
asm ("mov %0=sp" : "=r"(sp)); sp = ia64_getreg(_IA64_REG_AR_SP);
if ((sp - bsp) < 1024) { if ((sp - bsp) < 1024) {
static unsigned char count; static unsigned char count;
...@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead * 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows). * to kernel stack overflows).
*/ */
saved_tpr = ia64_get_tpr(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d(); ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (!IS_RESCHEDULE(vector)) { if (!IS_RESCHEDULE(vector)) {
ia64_set_tpr(vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d(); ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs); do_IRQ(local_vector_to_irq(vector), regs);
...@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* Disable interrupts and send EOI: * Disable interrupts and send EOI:
*/ */
local_irq_disable(); local_irq_disable();
ia64_set_tpr(saved_tpr); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
} }
ia64_eoi(); ia64_eoi();
vector = ia64_get_ivr(); vector = ia64_get_ivr();
...@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) ...@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu); phys_cpu_id = cpu_physical_id(cpu);
#else #else
phys_cpu_id = (ia64_get_lid() >> 16) & 0xffff; phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif #endif
/* /*
......
...@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void) ...@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_regval = 0; cmcv.cmcv_regval = 0;
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR; cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected "
"machine check vector %#x setup and enabled.\n", "machine check vector %#x setup and enabled.\n",
smp_processor_id(), IA64_CMC_VECTOR); smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n", IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n",
smp_processor_id(), ia64_get_cmcv()); smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
} }
/* /*
...@@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy) ...@@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv(); cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.\n", "machine check vector %#x disabled.\n",
...@@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy) ...@@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv(); cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.\n", "machine check vector %#x enabled.\n",
...@@ -727,10 +727,10 @@ ia64_mca_init(void) ...@@ -727,10 +727,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */ /* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler, ia64_mc_info.imi_monarch_init_handler,
ia64_tpa(ia64_get_gp()), ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler, ia64_mc_info.imi_slave_init_handler,
ia64_tpa(ia64_get_gp()), ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_slave_init_handler_size))) ia64_mc_info.imi_slave_init_handler_size)))
{ {
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. " printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
...@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void) ...@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void)
do { do {
switch(irr_num) { switch(irr_num) {
case 0: case 0:
irr = ia64_get_irr0(); irr = ia64_getreg(_IA64_REG_CR_IRR0);
break; break;
case 1: case 1:
irr = ia64_get_irr1(); irr = ia64_getreg(_IA64_REG_CR_IRR1);
break; break;
case 2: case 2:
irr = ia64_get_irr2(); irr = ia64_getreg(_IA64_REG_CR_IRR2);
break; break;
case 3: case 3:
irr = ia64_get_irr3(); irr = ia64_getreg(_IA64_REG_CR_IRR3);
break; break;
} }
} while (!(irr & (1 << irr_bit))) ; } while (!(irr & (1 << irr_bit))) ;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/intrinsics.h>
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
/* /*
...@@ -679,39 +680,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx); ...@@ -679,39 +680,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx);
static inline void static inline void
pfm_clear_psr_pp(void) pfm_clear_psr_pp(void)
{ {
__asm__ __volatile__ ("rsm psr.pp;; srlz.i;;"::: "memory"); ia64_rsm(IA64_PSR_PP)
ia64_srlz_i();
} }
static inline void static inline void
pfm_set_psr_pp(void) pfm_set_psr_pp(void)
{ {
__asm__ __volatile__ ("ssm psr.pp;; srlz.i;;"::: "memory"); ia64_ssm(IA64_PSR_PP)
ia64_srlz_i();
} }
static inline void static inline void
pfm_clear_psr_up(void) pfm_clear_psr_up(void)
{ {
__asm__ __volatile__ ("rsm psr.up;; srlz.i;;"::: "memory"); ia64_rsm(IA64_PSR_UP)
ia64_srlz_i();
} }
static inline void static inline void
pfm_set_psr_up(void) pfm_set_psr_up(void)
{ {
__asm__ __volatile__ ("ssm psr.up;; srlz.i;;"::: "memory"); ia64_ssm(IA64_PSR_UP)
ia64_srlz_i();
} }
static inline unsigned long static inline unsigned long
pfm_get_psr(void) pfm_get_psr(void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ("mov %0=psr;;": "=r"(tmp) :: "memory"); tmp = ia64_getreg(_IA64_REG_PSR);
ia64_srlz_i();
return tmp; return tmp;
} }
static inline void static inline void
pfm_set_psr_l(unsigned long val) pfm_set_psr_l(unsigned long val)
{ {
__asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(val): "memory"); ia64_setreg(_IA64_REG_PSR_L, val);
ia64_srlz_i();
} }
static inline void static inline void
...@@ -978,7 +985,8 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -978,7 +985,8 @@ pfm_restore_monitoring(struct task_struct *task)
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */ /* disable dcr pp */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR,
ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp(); pfm_clear_psr_pp();
} else { } else {
pfm_clear_psr_up(); pfm_clear_psr_up();
...@@ -1025,7 +1033,8 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -1025,7 +1033,8 @@ pfm_restore_monitoring(struct task_struct *task)
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */ /* enable dcr pp */
ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR,
ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
} }
pfm_set_psr_l(psr); pfm_set_psr_l(psr);
...@@ -1781,7 +1790,8 @@ pfm_syswide_force_stop(void *info) ...@@ -1781,7 +1790,8 @@ pfm_syswide_force_stop(void *info)
/* /*
* Update local PMU * Update local PMU
*/ */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR,
ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
/* /*
* update local cpuinfo * update local cpuinfo
...@@ -3952,7 +3962,8 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3952,7 +3962,8 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* *
* disable dcr pp * disable dcr pp
*/ */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR,
ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
/* /*
...@@ -4042,7 +4053,8 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4042,7 +4053,8 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_set_psr_pp(); pfm_set_psr_pp();
/* enable dcr pp */ /* enable dcr pp */
ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR,
ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
return 0; return 0;
...@@ -4207,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4207,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
current->pid, current->pid,
thread->pfm_context, ctx)); thread->pfm_context, ctx));
old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) { if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
goto error_unres; goto error_unres;
...@@ -5467,13 +5479,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i ...@@ -5467,13 +5479,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* if monitoring has started * if monitoring has started
*/ */
if (dcr_pp) { if (dcr_pp) {
dcr = ia64_get_dcr(); dcr = ia64_getreg(_IA64_REG_CR_DCR);
/* /*
* context switching in? * context switching in?
*/ */
if (is_ctxswin) { if (is_ctxswin) {
/* mask monitoring for the idle task */ /* mask monitoring for the idle task */
ia64_set_dcr(dcr & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp(); pfm_clear_psr_pp();
ia64_srlz_i(); ia64_srlz_i();
return; return;
...@@ -5485,7 +5497,7 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i ...@@ -5485,7 +5497,7 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* Due to inlining this odd if-then-else construction generates * Due to inlining this odd if-then-else construction generates
* better code. * better code.
*/ */
ia64_set_dcr(dcr |IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
pfm_set_psr_pp(); pfm_set_psr_pp();
ia64_srlz_i(); ia64_srlz_i();
} }
...@@ -6265,7 +6277,7 @@ pfm_init_percpu (void) ...@@ -6265,7 +6277,7 @@ pfm_init_percpu (void)
if (smp_processor_id() == 0) if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR); ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d(); ia64_srlz_d();
/* /*
......
...@@ -741,8 +741,8 @@ cpu_init (void) ...@@ -741,8 +741,8 @@ cpu_init (void)
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
* be fine). * be fine).
*/ */
ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR ia64_setreg(_IA64_REG_CR_DCR, IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC);
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
if (current->mm) if (current->mm)
...@@ -758,11 +758,11 @@ cpu_init (void) ...@@ -758,11 +758,11 @@ cpu_init (void)
ia64_set_itv(1 << 16); ia64_set_itv(1 << 16);
ia64_set_lrr0(1 << 16); ia64_set_lrr0(1 << 16);
ia64_set_lrr1(1 << 16); ia64_set_lrr1(1 << 16);
ia64_set_pmv(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
ia64_set_cmcv(1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
/* clear TPR & XTP to enable all interrupt classes: */ /* clear TPR & XTP to enable all interrupt classes: */
ia64_set_tpr(0); ia64_setreg(_IA64_REG_CR_TPR, 0);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
normal_xtp(); normal_xtp();
#endif #endif
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif #endif
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register double f16 asm ("f16"); register double f17 asm ("f17"); register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19"); register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21"); register double f20 asm ("f20"); register double f21 asm ("f21");
...@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
long long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr) ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
...@@ -192,7 +195,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -192,7 +195,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
case __SI_TIMER >> 16: case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value); err |= __put_user(from->si_value.sival_ptr, &to->si_value.sival_ptr);
break; break;
case __SI_CHLD >> 16: case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_utime, &to->si_utime);
......
...@@ -7,6 +7,19 @@ ...@@ -7,6 +7,19 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/ */
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
/* /*
* fp_emulate() needs to be able to access and update all floating point registers. Those * fp_emulate() needs to be able to access and update all floating point registers. Those
* saved in pt_regs can be accessed through that structure, but those not saved, will be * saved in pt_regs can be accessed through that structure, but those not saved, will be
...@@ -15,6 +28,8 @@ ...@@ -15,6 +28,8 @@
* by declaring preserved registers that are not marked as "fixed" as global register * by declaring preserved registers that are not marked as "fixed" as global register
* variables. * variables.
*/ */
#include <asm/intrinsics.h>
#ifdef ASM_SUPPORTED
register double f2 asm ("f2"); register double f3 asm ("f3"); register double f2 asm ("f2"); register double f3 asm ("f3");
register double f4 asm ("f4"); register double f5 asm ("f5"); register double f4 asm ("f4"); register double f5 asm ("f5");
...@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
extern spinlock_t timerlist_lock; extern spinlock_t timerlist_lock;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/rse.h> #include <asm/rse.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <asm/intrinsics.h>
extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
...@@ -231,7 +232,7 @@ static u16 fr_info[32]={ ...@@ -231,7 +232,7 @@ static u16 fr_info[32]={
static void static void
invala_gr (int regno) invala_gr (int regno)
{ {
# define F(reg) case reg: __asm__ __volatile__ ("invala.e r%0" :: "i"(reg)); break # define F(reg) case reg: ia64_invala_gr(reg); break
switch (regno) { switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
...@@ -258,7 +259,7 @@ invala_gr (int regno) ...@@ -258,7 +259,7 @@ invala_gr (int regno)
static void static void
invala_fr (int regno) invala_fr (int regno)
{ {
# define F(reg) case reg: __asm__ __volatile__ ("invala.e f%0" :: "i"(reg)); break # define F(reg) case reg: ia64_invala_fr(reg); break
switch (regno) { switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
...@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs) ...@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
static inline void static inline void
float_spill_f0 (struct ia64_fpreg *final) float_spill_f0 (struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("stf.spill [%0]=f0" :: "r"(final) : "memory"); ia64_stf_spill(final, 0);
} }
static inline void static inline void
float_spill_f1 (struct ia64_fpreg *final) float_spill_f1 (struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("stf.spill [%0]=f1" :: "r"(final) : "memory"); ia64_stf_spill(final, 1);
} }
static void static void
...@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={ ...@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={
static inline void static inline void
mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfe f6=[%0];; stf.spill [%1]=f6" ia64_ldfe(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf8 f6=[%0];; stf.spill [%1]=f6" ia64_ldf8(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfs f6=[%0];; stf.spill [%1]=f6" ia64_ldfs(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfd f6=[%0];; stf.spill [%1]=f6" ia64_ldfd(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfe [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfe(final, 6);
} }
static inline void static inline void
float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stf8 [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf8(final, 6);
} }
static inline void static inline void
float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfs [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfs(final, 6);
} }
static inline void static inline void
float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfd [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfd(final, 6);
} }
static int static int
......
...@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi ...@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi
/* /*
* Flush ALAT entries also. * Flush ALAT entries also.
*/ */
asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) ia64_ptcga(start, (nbits<<2));
: "memory"); ia64_srlz_i();
start += (1UL << nbits); start += (1UL << nbits);
} while (start < end); } while (start < end);
} }
...@@ -118,15 +118,13 @@ local_flush_tlb_all (void) ...@@ -118,15 +118,13 @@ local_flush_tlb_all (void)
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < count0; ++i) { for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) { for (j = 0; j < count1; ++j) {
asm volatile ("ptc.e %0" :: "r"(addr)); ia64_ptce(addr);
addr += stride1; addr += stride1;
} }
addr += stride0; addr += stride0;
} }
local_irq_restore(flags); local_irq_restore(flags);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
void void
...@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
platform_global_tlb_purge(start, end, nbits); platform_global_tlb_purge(start, end, nbits);
# else # else
do { do {
asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits); start += (1UL << nbits);
} while (start < end); } while (start < end);
# endif # endif
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
void __init void __init
......
...@@ -200,7 +200,7 @@ efi_unimplemented (void) ...@@ -200,7 +200,7 @@ efi_unimplemented (void)
#ifdef SGI_SN2 #ifdef SGI_SN2
#undef cpu_physical_id #undef cpu_physical_id
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) #define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
void void
fprom_send_cpei(void) { fprom_send_cpei(void) {
...@@ -224,14 +224,14 @@ fprom_send_cpei(void) { ...@@ -224,14 +224,14 @@ fprom_send_cpei(void) {
#endif #endif
static long static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2, sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7) unsigned long in6, unsigned long in7)
{ {
register long r9 asm ("r9") = 0; long r9 = 0;
register long r10 asm ("r10") = 0; long r10 = 0;
register long r11 asm ("r11") = 0; long r11 = 0;
long status; long status;
/* /*
...@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, ...@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} }
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
return status; return ((struct sal_ret_values) {status, r9, r10, r11});
} }
......
...@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) { ...@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
irr_bit = irq_to_vector(irq) % 64; irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) { switch (irr_reg_num) {
case 0: case 0:
irr_reg = ia64_get_irr0(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break; break;
case 1: case 1:
irr_reg = ia64_get_irr1(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break; break;
case 2: case 2:
irr_reg = ia64_get_irr2(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break; break;
case 3: case 3:
irr_reg = ia64_get_irr3(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break; break;
} }
if (!test_bit(irr_bit, &irr_reg) ) { if (!test_bit(irr_bit, &irr_reg) ) {
...@@ -354,9 +354,9 @@ sn_get_next_bit(void) { ...@@ -354,9 +354,9 @@ sn_get_next_bit(void) {
void void
sn_set_tpr(int vector) { sn_set_tpr(int vector) {
if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) { if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
ia64_set_tpr(vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
} else { } else {
ia64_set_tpr(IA64_LAST_DEVICE_VECTOR); ia64_setreg(_IA64_REG_CR_TPR, IA64_LAST_DEVICE_VECTOR);
} }
} }
......
...@@ -395,7 +395,7 @@ sn_cpu_init(void) ...@@ -395,7 +395,7 @@ sn_cpu_init(void)
return; return;
cpuid = smp_processor_id(); cpuid = smp_processor_id();
cpuphyid = ((ia64_get_lid() >> 16) & 0xffff); cpuphyid = ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff);
nasid = cpu_physical_id_to_nasid(cpuphyid); nasid = cpu_physical_id_to_nasid(cpuphyid);
cnode = nasid_to_cnodeid(nasid); cnode = nasid_to_cnodeid(nasid);
slice = cpu_physical_id_to_slice(cpuphyid); slice = cpu_physical_id_to_slice(cpuphyid);
......
...@@ -11,81 +11,73 @@ ...@@ -11,81 +11,73 @@
#include <asm/sn/sn2/io.h> #include <asm/sn/sn2/io.h>
#undef __sn_inb
#undef __sn_inw
#undef __sn_inl
#undef __sn_outb
#undef __sn_outw
#undef __sn_outl
#undef __sn_readb
#undef __sn_readw
#undef __sn_readl
#undef __sn_readq
unsigned int unsigned int
sn_inb (unsigned long port) __sn_inb (unsigned long port)
{ {
return __sn_inb(port); return ___sn_inb(port);
} }
unsigned int unsigned int
sn_inw (unsigned long port) __sn_inw (unsigned long port)
{ {
return __sn_inw(port); return ___sn_inw(port);
} }
unsigned int unsigned int
sn_inl (unsigned long port) __sn_inl (unsigned long port)
{ {
return __sn_inl(port); return ___sn_inl(port);
} }
void void
sn_outb (unsigned char val, unsigned long port) __sn_outb (unsigned char val, unsigned long port)
{ {
__sn_outb(val, port); ___sn_outb(val, port);
} }
void void
sn_outw (unsigned short val, unsigned long port) __sn_outw (unsigned short val, unsigned long port)
{ {
__sn_outw(val, port); ___sn_outw(val, port);
} }
void void
sn_outl (unsigned int val, unsigned long port) __sn_outl (unsigned int val, unsigned long port)
{ {
__sn_outl(val, port); ___sn_outl(val, port);
} }
unsigned char unsigned char
sn_readb (void *addr) __sn_readb (void *addr)
{ {
return __sn_readb (addr); return ___sn_readb (addr);
} }
unsigned short unsigned short
sn_readw (void *addr) __sn_readw (void *addr)
{ {
return __sn_readw (addr); return ___sn_readw (addr);
} }
unsigned int unsigned int
sn_readl (void *addr) __sn_readl (void *addr)
{ {
return __sn_readl (addr); return ___sn_readl (addr);
} }
unsigned long unsigned long
sn_readq (void *addr) __sn_readq (void *addr)
{ {
return __sn_readq (addr); return ___sn_readq (addr);
} }
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
...@@ -35,6 +35,7 @@ SECTIONS ...@@ -35,6 +35,7 @@ SECTIONS
{ {
*(.text.ivt) *(.text.ivt)
*(.text) *(.text)
*(.gnu.linkonce.t*)
} }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET) .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) } { *(.text2) }
...@@ -183,7 +184,7 @@ SECTIONS ...@@ -183,7 +184,7 @@ SECTIONS
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - LOAD_OFFSET) .data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16); . = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
...@@ -194,7 +195,7 @@ SECTIONS ...@@ -194,7 +195,7 @@ SECTIONS
can access them all, and initialized data all before uninitialized, so can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */ we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) } { *(.sdata) *(.sdata1) *(.srdata) }
_edata = .; _edata = .;
_bss = .; _bss = .;
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
......
...@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v) ...@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old + i; new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v) ...@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old + i; new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old - i; new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ...@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old - i; new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
......
...@@ -292,7 +292,7 @@ ffz (unsigned long x) ...@@ -292,7 +292,7 @@ ffz (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1))); result = ia64_popcnt((x & (~x - 1)));
return result; return result;
} }
...@@ -307,7 +307,7 @@ __ffs (unsigned long x) ...@@ -307,7 +307,7 @@ __ffs (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x)); result = ia64_popcnt((x-1) & ~x);
return result; return result;
} }
...@@ -323,7 +323,7 @@ ia64_fls (unsigned long x) ...@@ -323,7 +323,7 @@ ia64_fls (unsigned long x)
long double d = x; long double d = x;
long exp; long exp;
__asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d)); exp = ia64_getf_exp(d);
return exp - 0xffff; return exp - 0xffff;
} }
...@@ -349,7 +349,7 @@ static __inline__ unsigned long ...@@ -349,7 +349,7 @@ static __inline__ unsigned long
hweight64 (unsigned long x) hweight64 (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x)); result = ia64_popcnt(x);
return result; return result;
} }
......
...@@ -7,13 +7,14 @@ ...@@ -7,13 +7,14 @@
*/ */
#include <asm/types.h> #include <asm/types.h>
#include <asm/intrinsics.h>
static __inline__ __const__ __u64 static __inline__ __const__ __u64
__ia64_swab64 (__u64 x) __ia64_swab64 (__u64 x)
{ {
__u64 result; __u64 result;
__asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x)); result = ia64_mux1(x, ia64_mux1_rev);
return result; return result;
} }
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <asm/intrinsics.h>
/* In kernel mode, thread pointer (r13) is used to point to the /* In kernel mode, thread pointer (r13) is used to point to the
current task structure. */ current task structure. */
register struct task_struct *current asm ("r13"); #define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */ #endif /* _ASM_IA64_CURRENT_H */
...@@ -18,11 +18,13 @@ ...@@ -18,11 +18,13 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/intrinsics.h>
static __inline__ void static __inline__ void
ia64_set_itm (unsigned long val) ia64_set_itm (unsigned long val)
{ {
__asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_CR_ITM, val);
ia64_srlz_d();
} }
static __inline__ unsigned long static __inline__ unsigned long
...@@ -30,20 +32,23 @@ ia64_get_itm (void) ...@@ -30,20 +32,23 @@ ia64_get_itm (void)
{ {
unsigned long result; unsigned long result;
__asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_CR_ITM);
ia64_srlz_d();
return result; return result;
} }
static __inline__ void static __inline__ void
ia64_set_itv (unsigned long val) ia64_set_itv (unsigned long val)
{ {
__asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_CR_ITV, val);
ia64_srlz_d();
} }
static __inline__ void static __inline__ void
ia64_set_itc (unsigned long val) ia64_set_itc (unsigned long val)
{ {
__asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_AR_ITC, val);
ia64_srlz_d();
} }
static __inline__ unsigned long static __inline__ unsigned long
...@@ -51,10 +56,13 @@ ia64_get_itc (void) ...@@ -51,10 +56,13 @@ ia64_get_itc (void)
{ {
unsigned long result; unsigned long result;
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
while (unlikely((__s32) result == -1)) while (unlikely((__s32) result == -1)) {
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
}
#endif #endif
return result; return result;
} }
...@@ -62,15 +70,11 @@ ia64_get_itc (void) ...@@ -62,15 +70,11 @@ ia64_get_itc (void)
static __inline__ void static __inline__ void
__delay (unsigned long loops) __delay (unsigned long loops)
{ {
unsigned long saved_ar_lc;
if (loops < 1) if (loops < 1)
return; return;
__asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc)); for (;loops--;)
__asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1)); ia64_nop(0);
__asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
__asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
} }
static __inline__ void static __inline__ void
......
This diff is collapsed.
/*
* Copyright (C) 2002,2003 Intel Corp.
* Jun Nakajima <jun.nakajima@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_IA64_IA64REGS_H
#define _ASM_IA64_IA64REGS_H
/*
** Register Names for getreg() and setreg()
*/
/* Special Registers */
#define _IA64_REG_IP 1016 /* getreg only */
#define _IA64_REG_PSR 1019
#define _IA64_REG_PSR_L 1019
// General Integer Registers
#define _IA64_REG_GP 1025 /* R1 */
#define _IA64_REG_R8 1032 /* R8 */
#define _IA64_REG_R9 1033 /* R9 */
#define _IA64_REG_SP 1036 /* R12 */
#define _IA64_REG_TP 1037 /* R13 */
/* Application Registers */
#define _IA64_REG_AR_KR0 3072
#define _IA64_REG_AR_KR1 3073
#define _IA64_REG_AR_KR2 3074
#define _IA64_REG_AR_KR3 3075
#define _IA64_REG_AR_KR4 3076
#define _IA64_REG_AR_KR5 3077
#define _IA64_REG_AR_KR6 3078
#define _IA64_REG_AR_KR7 3079
#define _IA64_REG_AR_RSC 3088
#define _IA64_REG_AR_BSP 3089
#define _IA64_REG_AR_BSPSTORE 3090
#define _IA64_REG_AR_RNAT 3091
#define _IA64_REG_AR_FCR 3093
#define _IA64_REG_AR_EFLAG 3096
#define _IA64_REG_AR_CSD 3097
#define _IA64_REG_AR_SSD 3098
#define _IA64_REG_AR_CFLAG 3099
#define _IA64_REG_AR_FSR 3100
#define _IA64_REG_AR_FIR 3101
#define _IA64_REG_AR_FDR 3102
#define _IA64_REG_AR_CCV 3104
#define _IA64_REG_AR_UNAT 3108
#define _IA64_REG_AR_FPSR 3112
#define _IA64_REG_AR_ITC 3116
#define _IA64_REG_AR_PFS 3136
#define _IA64_REG_AR_LC 3137
#define _IA64_REG_AR_EC 3138
/* Control Registers */
#define _IA64_REG_CR_DCR 4096
#define _IA64_REG_CR_ITM 4097
#define _IA64_REG_CR_IVA 4098
#define _IA64_REG_CR_PTA 4104
#define _IA64_REG_CR_IPSR 4112
#define _IA64_REG_CR_ISR 4113
#define _IA64_REG_CR_IIP 4115
#define _IA64_REG_CR_IFA 4116
#define _IA64_REG_CR_ITIR 4117
#define _IA64_REG_CR_IIPA 4118
#define _IA64_REG_CR_IFS 4119
#define _IA64_REG_CR_IIM 4120
#define _IA64_REG_CR_IHA 4121
#define _IA64_REG_CR_LID 4160
#define _IA64_REG_CR_IVR 4161 /* getreg only */
#define _IA64_REG_CR_TPR 4162
#define _IA64_REG_CR_EOI 4163
#define _IA64_REG_CR_IRR0 4164 /* getreg only */
#define _IA64_REG_CR_IRR1 4165 /* getreg only */
#define _IA64_REG_CR_IRR2 4166 /* getreg only */
#define _IA64_REG_CR_IRR3 4167 /* getreg only */
#define _IA64_REG_CR_ITV 4168
#define _IA64_REG_CR_PMV 4169
#define _IA64_REG_CR_CMCV 4170
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
#define _IA64_REG_INDR_DBR 9001
#define _IA64_REG_INDR_IBR 9002
#define _IA64_REG_INDR_PKR 9003
#define _IA64_REG_INDR_PMC 9004
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
#endif /* _ASM_IA64_IA64REGS_H */
...@@ -8,8 +8,17 @@ ...@@ -8,8 +8,17 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef __ASSEMBLY__
#include <linux/config.h> #include <linux/config.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
#include <asm/intel_intrin.h>
#else
#include <asm/gcc_intrin.h>
#endif
/* /*
* Force an unresolved reference if someone tries to use * Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value. * ia64_fetch_and_add() with a bad value.
...@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \ ({ \
switch (sz) { \ switch (sz) { \
case 4: \ case 4: \
__asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \ tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
case 8: \ case 8: \
__asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \ tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
default: \ default: \
...@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
(__typeof__(*(v))) (_tmp); /* return old value */ \ (__typeof__(*(v))) (_tmp); /* return old value */ \
}) })
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */ #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
/* /*
* This function doesn't exist, so you'll get a linker error if * This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg(). * something tries to do an invalid xchg().
*/ */
extern void __xchg_called_with_bad_pointer (void); extern void ia64_xchg_called_with_bad_pointer (void);
static __inline__ unsigned long #define __xchg(x,ptr,size) \
__xchg (unsigned long x, volatile void *ptr, int size) ({ \
{ unsigned long __xchg_result; \
unsigned long result; \
switch (size) { \
switch (size) { case 1: \
case 1: __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result) break; \
: "r" (ptr), "r" (x) : "memory"); \
return result; case 2: \
__xchg_result = ia64_xchg2((__u16 *)ptr, x); \
case 2: break; \
__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result) \
: "r" (ptr), "r" (x) : "memory"); case 4: \
return result; __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
break; \
case 4: \
__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result) case 8: \
: "r" (ptr), "r" (x) : "memory"); __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
return result; break; \
default: \
case 8: ia64_xchg_called_with_bad_pointer(); \
__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result) } \
: "r" (ptr), "r" (x) : "memory"); __xchg_result; \
return result; })
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr,x) \ #define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
...@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size) ...@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size)
* This function doesn't exist, so you'll get a linker error * This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). * if something tries to do an invalid cmpxchg().
*/ */
extern long __cmpxchg_called_with_bad_pointer(void); extern long ia64_cmpxchg_called_with_bad_pointer(void);
#define ia64_cmpxchg(sem,ptr,old,new,size) \ #define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \ ({ \
__typeof__(ptr) _p_ = (ptr); \
__typeof__(new) _n_ = (new); \
__u64 _o_, _r_; \ __u64 _o_, _r_; \
\ \
switch (size) { \ switch (size) { \
...@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void);
case 8: _o_ = (__u64) (long) (old); break; \ case 8: _o_ = (__u64) (long) (old); break; \
default: break; \ default: break; \
} \ } \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 2: \ case 2: \
__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 4: \ case 4: \
__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 8: \ case 8: \
__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
default: \ default: \
_r_ = __cmpxchg_called_with_bad_pointer(); \ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \ break; \
} \ } \
(__typeof__(old)) _r_; \ (__typeof__(old)) _r_; \
}) })
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */ /* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n) #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
...@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
if (_cmpxchg_bugcheck_count-- <= 0) { \ if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \ void *ip; \
extern int printk(const char *fmt, ...); \ extern int printk(const char *fmt, ...); \
asm ("mov %0=ip" : "=r"(ip)); \ ip = ia64_getreg(_IA64_REG_IP); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \ break; \
} \ } \
...@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void);
# define CMPXCHG_BUGCHECK(v) # define CMPXCHG_BUGCHECK(v)
#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
#endif
#endif /* _ASM_IA64_INTRINSICS_H */ #endif /* _ASM_IA64_INTRINSICS_H */
...@@ -55,6 +55,7 @@ extern unsigned int num_io_spaces; ...@@ -55,6 +55,7 @@ extern unsigned int num_io_spaces;
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/intrinsics.h>
/* /*
* Change virtual addresses to physical addresses and vv. * Change virtual addresses to physical addresses and vv.
...@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address) ...@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address)
* Memory fence w/accept. This should never be used in code that is * Memory fence w/accept. This should never be used in code that is
* not IA-64 specific. * not IA-64 specific.
*/ */
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __ia64_mf_a() ia64_mfa()
static inline const unsigned long static inline const unsigned long
__ia64_get_io_port_base (void) __ia64_get_io_port_base (void)
......
...@@ -155,7 +155,7 @@ struct ia64_machine_vector { ...@@ -155,7 +155,7 @@ struct ia64_machine_vector {
ia64_mv_readw_t *readw; ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl; ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq; ia64_mv_readq_t *readq;
}; } __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \ #define MACHVEC_INIT(name) \
{ \ { \
......
...@@ -158,9 +158,7 @@ reload_context (mm_context_t context) ...@@ -158,9 +158,7 @@ reload_context (mm_context_t context)
ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x4000000000000000, rr2);
ia64_set_rr(0x6000000000000000, rr3); ia64_set_rr(0x6000000000000000, rr3);
ia64_set_rr(0x8000000000000000, rr4); ia64_set_rr(0x8000000000000000, rr4);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
static inline void static inline void
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/intrinsics.h>
/* /*
* PAGE_SHIFT determines the actual kernel page size. * PAGE_SHIFT determines the actual kernel page size.
...@@ -143,7 +144,7 @@ get_order (unsigned long size) ...@@ -143,7 +144,7 @@ get_order (unsigned long size)
double d = size - 1; double d = size - 1;
long order; long order;
__asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d)); order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1; order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0) if (order < 0)
order = 0; order = 0;
......
...@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector ...@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector
/* Initialize the processor controlled caches */ /* Initialize the processor controlled caches */
static inline s64 static inline s64
ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict) ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
{ {
struct ia64_pal_retval iprv; struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict); PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
return iprv.status; return iprv.status;
} }
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/intrinsics.h>
/* /*
* the semaphore definition * the semaphore definition
...@@ -82,8 +83,9 @@ static inline void ...@@ -82,8 +83,9 @@ static inline void
__down_read (struct rw_semaphore *sem) __down_read (struct rw_semaphore *sem)
{ {
int result; int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
"=r"(result) : "r"(&sem->count) : "memory"); result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
if (result < 0) if (result < 0)
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -112,8 +114,9 @@ static inline void ...@@ -112,8 +114,9 @@ static inline void
__up_read (struct rw_semaphore *sem) __up_read (struct rw_semaphore *sem)
{ {
int result; int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
"=r"(result) : "r"(&sem->count) : "memory"); result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
} }
......
...@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, ...@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
extern unsigned long sal_platform_features; extern unsigned long sal_platform_features;
struct sal_ret_values {
long r8; long r9; long r10; long r11;
};
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */ #endif /* _ASM_IA64_PAL_H */
...@@ -120,7 +120,7 @@ hard_smp_processor_id (void) ...@@ -120,7 +120,7 @@ hard_smp_processor_id (void)
unsigned long bits; unsigned long bits;
} lid; } lid;
lid.bits = ia64_get_lid(); lid.bits = ia64_getreg(_IA64_REG_CR_LID);
return lid.f.id << 8 | lid.f.eid; return lid.f.id << 8 | lid.f.eid;
} }
......
...@@ -11,11 +11,23 @@ ...@@ -11,11 +11,23 @@
extern void * sn_io_addr(unsigned long port); /* Forward definition */ extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */ extern void sn_mmiob(void); /* Forward definition */
#include <asm/intrinsics.h>
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long); extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
/* /*
* The following routines are SN Platform specific, called when * The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform * a reference is made to inX/outX set macros. SN Platform
...@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long); ...@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long);
*/ */
static inline unsigned int static inline unsigned int
__sn_inb (unsigned long port) ___sn_inb (unsigned long port)
{ {
volatile unsigned char *addr; volatile unsigned char *addr;
unsigned char ret = -1; unsigned char ret = -1;
...@@ -40,7 +52,7 @@ __sn_inb (unsigned long port) ...@@ -40,7 +52,7 @@ __sn_inb (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__sn_inw (unsigned long port) ___sn_inw (unsigned long port)
{ {
volatile unsigned short *addr; volatile unsigned short *addr;
unsigned short ret = -1; unsigned short ret = -1;
...@@ -54,7 +66,7 @@ __sn_inw (unsigned long port) ...@@ -54,7 +66,7 @@ __sn_inw (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__sn_inl (unsigned long port) ___sn_inl (unsigned long port)
{ {
volatile unsigned int *addr; volatile unsigned int *addr;
unsigned int ret = -1; unsigned int ret = -1;
...@@ -68,7 +80,7 @@ __sn_inl (unsigned long port) ...@@ -68,7 +80,7 @@ __sn_inl (unsigned long port)
} }
static inline void static inline void
__sn_outb (unsigned char val, unsigned long port) ___sn_outb (unsigned char val, unsigned long port)
{ {
volatile unsigned char *addr; volatile unsigned char *addr;
...@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port) ...@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port)
} }
static inline void static inline void
__sn_outw (unsigned short val, unsigned long port) ___sn_outw (unsigned short val, unsigned long port)
{ {
volatile unsigned short *addr; volatile unsigned short *addr;
...@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port) ...@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port)
} }
static inline void static inline void
__sn_outl (unsigned int val, unsigned long port) ___sn_outl (unsigned int val, unsigned long port)
{ {
volatile unsigned int *addr; volatile unsigned int *addr;
...@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port) ...@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port)
*/ */
static inline unsigned char static inline unsigned char
__sn_readb (void *addr) ___sn_readb (void *addr)
{ {
unsigned char val; unsigned char val;
...@@ -121,7 +133,7 @@ __sn_readb (void *addr) ...@@ -121,7 +133,7 @@ __sn_readb (void *addr)
} }
static inline unsigned short static inline unsigned short
__sn_readw (void *addr) ___sn_readw (void *addr)
{ {
unsigned short val; unsigned short val;
...@@ -132,7 +144,7 @@ __sn_readw (void *addr) ...@@ -132,7 +144,7 @@ __sn_readw (void *addr)
} }
static inline unsigned int static inline unsigned int
__sn_readl (void *addr) ___sn_readl (void *addr)
{ {
unsigned int val; unsigned int val;
...@@ -143,7 +155,7 @@ __sn_readl (void *addr) ...@@ -143,7 +155,7 @@ __sn_readl (void *addr)
} }
static inline unsigned long static inline unsigned long
__sn_readq (void *addr) ___sn_readq (void *addr)
{ {
unsigned long val; unsigned long val;
......
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0 #define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) #define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif #endif
/* /*
...@@ -98,8 +98,8 @@ ...@@ -98,8 +98,8 @@
*/ */
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff) #define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3) #define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff) #define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff)
#define get_slice() ((ia64_get_lid() >> 28) & 0xf) #define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff) #define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
/* /*
......
...@@ -10,10 +10,12 @@ ...@@ -10,10 +10,12 @@
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/compiler.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/intrinsics.h>
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
...@@ -102,8 +104,8 @@ typedef struct { ...@@ -102,8 +104,8 @@ typedef struct {
do { \ do { \
rwlock_t *__read_lock_ptr = (rw); \ rwlock_t *__read_lock_ptr = (rw); \
\ \
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
while (*(volatile int *)__read_lock_ptr < 0) \ while (*(volatile int *)__read_lock_ptr < 0) \
cpu_relax(); \ cpu_relax(); \
} \ } \
...@@ -112,7 +114,7 @@ do { \ ...@@ -112,7 +114,7 @@ do { \
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
do { \ do { \
rwlock_t *__read_lock_ptr = (rw); \ rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0) } while (0)
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
......
...@@ -55,12 +55,6 @@ extern struct ia64_boot_param { ...@@ -55,12 +55,6 @@ extern struct ia64_boot_param {
__u64 initrd_size; __u64 initrd_size;
} *ia64_boot_param; } *ia64_boot_param;
static inline void
ia64_insn_group_barrier (void)
{
__asm__ __volatile__ (";;" ::: "memory");
}
/* /*
* Macros to force memory ordering. In these descriptions, "previous" * Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all * and "subsequent" refer to program order; "visible" means that all
...@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void) ...@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void)
* it's (presumably) much slower than mf and (b) mf.a is supported for * it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only. * sequential memory pages only.
*/ */
#define mb() __asm__ __volatile__ ("mf" ::: "memory") #define mb() ia64_mf()
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
...@@ -119,22 +113,28 @@ ia64_insn_group_barrier (void) ...@@ -119,22 +113,28 @@ ia64_insn_group_barrier (void)
/* clearing psr.i is implicitly serialized (visible by next insn) */ /* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */ /* setting psr.i requires data serialization */
#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \ #define __local_irq_save(x) \
"rsm psr.i;;" \ do { \
: "=r" (x) :: "memory") unsigned long psr; \
#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") psr = ia64_getreg(_IA64_REG_PSR); \
#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ ia64_stop(); \
"(p6) ssm psr.i;" \ ia64_rsm(IA64_PSR_I); \
"(p7) rsm psr.i;;" \ (x) = psr; \
"(p6) srlz.d" \ } while (0)
:: "r" ((x) & IA64_PSR_I) \
: "p6", "p7", "memory") #define __local_irq_disable() \
do { \
ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
} while (0)
#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
#ifdef CONFIG_IA64_DEBUG_IRQ #ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip; extern unsigned long last_cli_ip;
# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip)) # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
# define local_irq_save(x) \ # define local_irq_save(x) \
do { \ do { \
...@@ -164,8 +164,8 @@ do { \ ...@@ -164,8 +164,8 @@ do { \
# define local_irq_restore(x) __local_irq_restore(x) # define local_irq_restore(x) __local_irq_restore(x)
#endif /* !CONFIG_IA64_DEBUG_IRQ */ #endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") #define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") #define local_save_flags(flags) ({ (flags) = ia64_getreg(_IA64_REG_PSR); })
#define irqs_disabled() \ #define irqs_disabled() \
({ \ ({ \
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/intrinsics.h>
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
...@@ -32,7 +33,7 @@ get_cycles (void) ...@@ -32,7 +33,7 @@ get_cycles (void)
{ {
cycles_t ret; cycles_t ret;
__asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret)); ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret; return ret;
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/intrinsics.h>
/* /*
* Now for some TLB flushing routines. This is the kind of stuff that * Now for some TLB flushing routines. This is the kind of stuff that
...@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) ...@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else #else
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); ia64_ptcl(addr, (PAGE_SHIFT << 2));
else else
vma->vm_mm->context = 0; vma->vm_mm->context = 0;
#endif #endif
......
...@@ -334,73 +334,18 @@ waitpid (int pid, int * wait_stat, int flags) ...@@ -334,73 +334,18 @@ waitpid (int pid, int * wait_stat, int flags)
} }
static inline int extern int execve (const char *filename, char *const av[], char *const ep[]);
execve (const char *filename, char *const av[], char *const ep[]) extern pid_t clone (unsigned long flags, void *sp);
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_execve;
register long out0 asm("out0") = (long)filename;
register long out1 asm("out1") = (long)av;
register long out2 asm("out2") = (long)ep;
asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2)
: "2" (r15), "3" (out0), "4" (out1), "5" (out2)
: "memory", "out3", "out4", "out5", "out6", "out7",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
return r8;
}
static inline pid_t
clone (unsigned long flags, void *sp)
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_clone;
register long out0 asm("out0") = (long)flags;
register long out1 asm("out1") = (long)sp;
long retval;
/* clone clobbers current, hence the "r13" in the clobbers list */
asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1)
: "2" (r15), "3" (out0), "4" (out1)
: "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
retval = r8;
return retval;;
}
#endif /* __KERNEL_SYSCALLS__ */ #endif /* __KERNEL_SYSCALLS__ */
/* /*
* "Conditional" syscalls * "Conditional" syscalls
* *
* What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on * Note, this macro can only be used in the
* all toolchains, so we just do it by hand. Note, this macro can only be used in the
* file which defines sys_ni_syscall, i.e., in kernel/sys.c. * file which defines sys_ni_syscall, i.e., in kernel/sys.c.
*/ */
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); #define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall")));
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment