Commit 11d03417 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents be974925 8417a5d6
......@@ -66,7 +66,7 @@ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
boot := arch/ia64/boot
boot := arch/ia64/hp/sim/boot
.PHONY: boot compressed check
......
......@@ -7,7 +7,7 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
obj-y := hpsim_irq.o hpsim_setup.o
obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
......
......@@ -5,7 +5,7 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1998 by David Mosberger-Tang <davidm@hpl.hp.com>
# Copyright (C) 1998, 2003 by David Mosberger-Tang <davidm@hpl.hp.com>
#
targets-$(CONFIG_IA64_HP_SIM) += bootloader
......@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o \
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
.bss
.align 16
stack_mem:
.skip 16834
.text
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
GLOBAL_ENTRY(printk)
break 0
END(printk)
GLOBAL_ENTRY(_start)
.prologue
.save rp, r0
.body
movl gp = __gp
movl sp = stack_mem
bsw.1
br.call.sptk.many rp=start_bootloader
END(_start)
GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0
mov r15=in4
break 0x80001
br.ret.sptk.many b0
END(ssc)
GLOBAL_ENTRY(jmp_to_kernel)
.regstk 2,0,0,0
mov r28=in0
mov b7=in1
br.sptk.few b7
END(jmp_to_kernel)
GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
movl r9=0x100000000 /* tc.base */
movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp
1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;;
1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */
mov r10=0 /* hints */
mov r11=0
br.cond.sptk.few rp
1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f
mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */
;;
mov ar.lc=r8
movl r8=0xe000000000000000
;;
.loop: fc r8
add r8=32,r8
br.cloop.sptk.few .loop
sync.i
;;
srlz.i
;;
mov ar.lc=r9
mov r8=r0
;;
1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */
mov r10=0 /* reserved */
mov r11=0 /* reserved */
mov r16=0xffff /* implemented PMC */
mov r17=0xffff /* implemented PMD */
add r18=8,r29 /* second index */
;;
st8 [r29]=r16,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store implemented PMD */
st8 [r18]=r0,16 /* clear remaining bits */
mov r16=0xf0 /* cycles count capable PMC */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
mov r17=0x10 /* retired bundles capable PMC */
;;
st8 [r29]=r16,16 /* store cycles capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store retired bundle capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
1: br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
END(pal_emulator_static)
/*
* arch/ia64/boot/bootloader.c
* arch/ia64/hp/sim/boot/bootloader.c
*
* Loads an ELF kernel.
*
* Copyright (C) 1998-2002 Hewlett-Packard Co
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
......@@ -17,31 +17,13 @@ struct task_struct; /* forward declaration for elf.h */
#include <linux/kernel.h>
#include <asm/elf.h>
#include <asm/intrinsics.h>
#include <asm/pal.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
#include <asm/system.h>
/* Simulator system calls: */
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_GET_ARGS 75
#include "ssc.h"
struct disk_req {
unsigned long addr;
......@@ -53,10 +35,8 @@ struct disk_stat {
unsigned count;
};
#include "../kernel/fw-emu.c"
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
asm (".global printk; printk = 0");
extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
/*
* Set a break point on this function so that symbols are available to set breakpoints in
......@@ -82,9 +62,8 @@ cons_write (const char *buf)
#define MAX_ARGS 32
void
_start (void)
start_bootloader (void)
{
static char stack[16384] __attribute__ ((aligned (16)));
static char mem[4096];
static char buffer[1024];
unsigned long off;
......@@ -98,10 +77,6 @@ _start (void)
char *kpath, *args;
long arglen = 0;
asm volatile ("movl gp=__gp;;" ::: "memory");
asm volatile ("mov sp=%0" :: "r"(stack) : "memory");
asm volatile ("bsw.1;;");
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
/*
......@@ -195,15 +170,14 @@ _start (void)
cons_write("starting kernel...\n");
/* fake an I/O base address: */
asm volatile ("mov ar.k0=%0" :: "r"(0xffffc000000UL));
ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL);
bp = sys_fw_init(args, arglen);
ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS);
debug_break();
asm volatile ("mov sp=%2; mov r28=%1; br.sptk.few %0"
:: "b"(e_entry), "r"(bp), "r"(__pa(&stack)));
jmp_to_kernel((unsigned long) bp, e_entry);
cons_write("kernel returned!\n");
ssc(-1, 0, 0, 0, SSC_EXIT);
......
......@@ -3,9 +3,6 @@
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* For the HP simulator, this file gets include in boot/bootloader.c.
* For SoftSDV, this file gets included in sys_softsdv.c.
*/
#include <linux/config.h>
......@@ -18,6 +15,8 @@
#include <asm/pal.h>
#include <asm/sal.h>
#include "ssc.h"
#define MB (1024*1024UL)
#define SIMPLE_MEMMAP 1
......@@ -37,27 +36,6 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
+ NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
+ 1024)] __attribute__ ((aligned (8)));
#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
/* Simulator system calls: */
#define SSC_EXIT 66
/*
* Simulator system call.
*/
static long
ssc (long arg0, long arg1, long arg2, long arg3, int nr)
{
register long r8 asm ("r8");
asm volatile ("mov r15=%1\n\t"
"break 0x80001"
: "=r"(r8)
: "r"(nr), "r"(arg0), "r"(arg1), "r"(arg2), "r"(arg3));
return r8;
}
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
......@@ -119,109 +97,8 @@ offtime (unsigned long t, efi_time_t *tp)
return 1;
}
#endif /* CONFIG_IA64_HP_SIM */
/*
* Very ugly, but we need this in the simulator only. Once we run on
* real hw, this can all go away.
*/
extern void pal_emulator_static (void);
asm (
" .proc pal_emulator_static\n"
"pal_emulator_static:"
" mov r8=-1\n"
" mov r9=256\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n"
"(p6) br.cond.sptk.few static\n"
" ;;\n"
" mov r9=512\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28\n"
"(p6) br.cond.sptk.few stacked\n"
" ;;\n"
"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" ;;\n"
" mov r8=0 /* status = 0 */\n"
" movl r9=0x100000000 /* tc.base */\n"
" movl r10=0x0000000200000003 /* count[0], count[1] */\n"
" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x100000064 /* proc_ratio (1/100) */\n"
" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n"
" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n"
" ;;\n"
"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" mov r9=96 /* num phys stacked */\n"
" mov r10=0 /* hints */\n"
" mov r11=0\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r9=ar.lc\n"
" movl r8=524288 /* flush 512k million cache lines (16MB) */\n"
" ;;\n"
" mov ar.lc=r8\n"
" movl r8=0xe000000000000000\n"
" ;;\n"
".loop: fc r8\n"
" add r8=32,r8\n"
" br.cloop.sptk.few .loop\n"
" sync.i\n"
" ;;\n"
" srlz.i\n"
" ;;\n"
" mov ar.lc=r9\n"
" mov r8=r0\n"
" ;;\n"
"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */\n"
" mov r10=0 /* reserved */\n"
" mov r11=0 /* reserved */\n"
" mov r16=0xffff /* implemented PMC */\n"
" mov r17=0xffff /* implemented PMD */\n"
" add r18=8,r29 /* second index */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store implemented PMD */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r16=0xf0 /* cycles count capable PMC */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r17=0x10 /* retired bundles capable PMC */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store cycles capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store retired bundle capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
"1: br.cond.sptk.few rp\n"
"stacked:\n"
" br.ret.sptk.few rp\n"
" .endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
......@@ -268,14 +145,14 @@ efi_unimplemented (void)
return EFI_UNSUPPORTED;
}
static long
static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
register long r9 asm ("r9") = 0;
register long r10 asm ("r10") = 0;
register long r11 asm ("r11") = 0;
long r9 = 0;
long r10 = 0;
long r11 = 0;
long status;
/*
......@@ -357,8 +234,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} else {
status = -1;
}
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
return status;
return ((struct sal_ret_values) {status, r9, r10, r11});
}
......@@ -427,7 +303,7 @@ sys_fw_init (const char *args, int arglen)
efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0");
efi_systab->fw_revision = 1;
efi_systab->runtime = __pa(efi_runtime);
efi_systab->runtime = (void *) __pa(efi_runtime);
efi_systab->nr_tables = 1;
efi_systab->tables = __pa(efi_tables);
......
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*/
#ifndef ssc_h
#define ssc_h
/* Simulator system calls: */
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_GET_ARGS 75
/*
* Simulator system call.
*/
extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr);
#endif /* ssc_h */
#include <asm/asmmacro.h>
/*
* Simulator system call.
*/
GLOBAL_ENTRY(ia64_ssc)
mov r15=r36
break 0x80001
br.ret.sptk.many rp
END(ia64_ssc)
......@@ -25,19 +25,6 @@
#include "hpsim_ssc.h"
/*
* Simulator system call.
*/
asm (".text\n"
".align 32\n"
".global ia64_ssc\n"
".proc ia64_ssc\n"
"ia64_ssc:\n"
"mov r15=r36\n"
"break 0x80001\n"
"br.ret.sptk.many rp\n"
".endp\n");
void
ia64_ssc_connect_irq (long intr, long irq)
{
......
......@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/compat.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
......@@ -41,6 +42,11 @@
#define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173
#ifdef ASM_SUPPORTED
/*
* Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
* restore_ia32_fpstate_live() can be sure the live register contain user-level state.
*/
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
......@@ -50,6 +56,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
struct sigframe_ia32
{
......@@ -198,30 +205,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* All other fields unused...
*/
#define __ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define __stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
static int
save_ia32_fpstate_live (struct _fpstate_ia32 *save)
{
......@@ -238,18 +221,19 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT;
/* Readin fsr, fcr, fir, fdr and copy onto fpstate */
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr));
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr));
asm volatile ( "mov %0=ar.fir;" : "=r"(fir));
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr));
/* Read in fsr, fcr, fir, fdr and copy onto fpstate */
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
fir = ia64_getreg(_IA64_REG_AR_FIR);
fdr = ia64_getreg(_IA64_REG_AR_FDR);
/*
* We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction.
*/
new_fsr = fsr & ~0x80ff;
asm volatile ( "mov ar.fsr=%0;" :: "r"(new_fsr));
ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
__put_user(fcr & 0xffff, &save->cw);
__put_user(fsr & 0xffff, &save->sw);
......@@ -286,45 +270,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12);
ia64_stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13);
ia64_stfe(fpregp, 13);
copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 14);
ia64_stfe(fpregp, 14);
copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 15);
ia64_stfe(fpregp, 15);
copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stf8(&num128[0], 16);
__stf8(&num128[1], 17);
ia64_stf8(&num128[0], 16);
ia64_stf8(&num128[1], 17);
copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 18);
__stf8(&num128[1], 19);
ia64_stf8(&num128[0], 18);
ia64_stf8(&num128[1], 19);
copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 20);
__stf8(&num128[1], 21);
ia64_stf8(&num128[0], 20);
ia64_stf8(&num128[1], 21);
copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 22);
__stf8(&num128[1], 23);
ia64_stf8(&num128[0], 22);
ia64_stf8(&num128[1], 23);
copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 24);
__stf8(&num128[1], 25);
ia64_stf8(&num128[0], 24);
ia64_stf8(&num128[1], 25);
copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 26);
__stf8(&num128[1], 27);
ia64_stf8(&num128[0], 26);
ia64_stf8(&num128[1], 27);
copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 28);
__stf8(&num128[1], 29);
ia64_stf8(&num128[0], 28);
ia64_stf8(&num128[1], 29);
copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 30);
__stf8(&num128[1], 31);
ia64_stf8(&num128[0], 30);
ia64_stf8(&num128[1], 31);
copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
return 0;
}
......@@ -354,10 +338,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
* should remain same while writing.
* So, we do a read, change specific fields and write.
*/
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr));
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr));
asm volatile ( "mov %0=ar.fir;" : "=r"(fir));
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr));
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
fir = ia64_getreg(_IA64_REG_AR_FIR);
fdr = ia64_getreg(_IA64_REG_AR_FDR);
__get_user(mxcsr, (unsigned int *)&save->mxcsr);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
......@@ -391,10 +375,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffff)) | num64;
asm volatile ( "mov ar.fsr=%0;" :: "r"(fsr));
asm volatile ( "mov ar.fcr=%0;" :: "r"(fcr));
asm volatile ( "mov ar.fir=%0;" :: "r"(fir));
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr);
ia64_setreg(_IA64_REG_AR_FIR, fir);
ia64_setreg(_IA64_REG_AR_FDR, fdr);
/*
* restore f8..f11 onto pt_regs
......@@ -420,45 +404,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp);
ia64_ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(13, fpregp);
ia64_ldfe(13, fpregp);
copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(14, fpregp);
ia64_ldfe(14, fpregp);
copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(15, fpregp);
ia64_ldfe(15, fpregp);
copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
__ldf8(16, &num128[0]);
__ldf8(17, &num128[1]);
ia64_ldf8(16, &num128[0]);
ia64_ldf8(17, &num128[1]);
copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
__ldf8(18, &num128[0]);
__ldf8(19, &num128[1]);
ia64_ldf8(18, &num128[0]);
ia64_ldf8(19, &num128[1]);
copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
__ldf8(20, &num128[0]);
__ldf8(21, &num128[1]);
ia64_ldf8(20, &num128[0]);
ia64_ldf8(21, &num128[1]);
copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
__ldf8(22, &num128[0]);
__ldf8(23, &num128[1]);
ia64_ldf8(22, &num128[0]);
ia64_ldf8(23, &num128[1]);
copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
__ldf8(24, &num128[0]);
__ldf8(25, &num128[1]);
ia64_ldf8(24, &num128[0]);
ia64_ldf8(25, &num128[1]);
copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
__ldf8(26, &num128[0]);
__ldf8(27, &num128[1]);
ia64_ldf8(26, &num128[0]);
ia64_ldf8(27, &num128[1]);
copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
__ldf8(28, &num128[0]);
__ldf8(29, &num128[1]);
ia64_ldf8(28, &num128[0]);
ia64_ldf8(29, &num128[1]);
copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
__ldf8(30, &num128[0]);
__ldf8(31, &num128[1]);
ia64_ldf8(30, &num128[0]);
ia64_ldf8(31, &num128[1]);
return 0;
}
......@@ -705,7 +689,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate
/*
* `eflags' is in an ar register for this context
*/
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
flag = ia64_getreg(_IA64_REG_AR_EFLAG);
err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
......@@ -790,10 +774,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
* IA32 process's context.
*/
err |= __get_user(tmpflags, &sc->eflags);
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
flag = ia64_getreg(_IA64_REG_AR_EFLAG);
flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5);
asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag));
ia64_setreg(_IA64_REG_AR_EFLAG, flag);
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
}
......
......@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
......@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task)
void
ia32_save_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
......@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t)
fdr = t->thread.fdr;
tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;"
"mov ar.fsr=%1;"
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr);
ia64_setreg(_IA64_REG_AR_FIR, fir);
ia64_setreg(_IA64_REG_AR_FDR, fdr);
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
......@@ -178,7 +170,7 @@ void
ia32_cpu_init (void)
{
/* initialize global ia32 state - CR0 and CR4 */
asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
}
static int __init
......
......@@ -14,6 +14,7 @@
#include "ia32priv.h"
#include <asm/intrinsics.h>
#include <asm/ptrace.h>
int
......@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{
unsigned long fsr, fcr;
asm ("mov %0=ar.fsr;"
"mov %1=ar.fcr;"
: "=r"(fsr), "=r"(fcr));
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
siginfo.si_signo = SIGFPE;
/*
......
......@@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
extern void ia32_load_segment_descriptors (struct task_struct *task);
#define ia32f2ia64f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#define ia32f2ia64f(dst,src) \
do { \
ia64_ldfe(6,src); \
ia64_stop(); \
ia64_stf_spill(dst, 6); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
ia64_ldf_fill(6, src); \
ia64_stop(); \
ia64_stfe(dst, 6); \
} while(0)
struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax;
......@@ -468,11 +470,8 @@ struct user_regs_struct32 {
};
/* Prototypes for use in elfcore32.h */
int save_ia32_fpstate (struct task_struct *tsk,
struct ia32_user_i387_struct *save);
int save_ia32_fpxstate (struct task_struct *tsk,
struct ia32_user_fxsr_struct *save);
extern int save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save);
extern int save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save);
#endif /* !CONFIG_IA32_SUPPORT */
......
......@@ -51,9 +51,10 @@
#include <linux/compat.h>
#include <linux/vfs.h>
#include <asm/intrinsics.h>
#include <asm/semaphore.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include "ia32priv.h"
......@@ -2192,7 +2193,7 @@ sys32_iopl (int level)
if (level != 3)
return(-EINVAL);
/* Trying to gain more privileges? */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(old));
old = ia64_getreg(_IA64_REG_AR_EFLAG);
if ((unsigned int) level > ((old >> 12) & 3)) {
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
......@@ -2216,7 +2217,7 @@ sys32_iopl (int level)
if (addr >= 0) {
old = (old & ~0x3000) | (level << 12);
asm volatile ("mov ar.eflag=%0;;" :: "r"(old));
ia64_setreg(_IA64_REG_AR_EFLAG, old);
}
fput(file);
......
......@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall)
br.ret.sptk.many rp
END(__ia64_syscall)
GLOBAL_ENTRY(execve)
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(execve)
GLOBAL_ENTRY(clone)
mov r15=__NR_clone // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(clone)
/*
* We invoke syscall_trace through this intermediate function to
* ensure that the syscall input arguments are not clobbered. We
......
......@@ -39,4 +39,4 @@ static union {
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
asm (".global init_task; init_task = init_task_mem");
extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
......@@ -495,7 +495,7 @@ iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger)
{
int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff;
unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = gsi_to_vector(gsi);
if (vector < 0)
......@@ -572,7 +572,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long trigger)
{
int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff;
unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = isa_irq_to_vector(isa_irq);
......@@ -666,11 +666,11 @@ iosapic_enable_intr (unsigned int vector)
* Direct the interrupt vector to the current cpu, platform redirection
* will distribute them.
*/
dest = (ia64_get_lid() >> 16) & 0xffff;
dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
}
#else
/* direct the interrupt vector to the running cpu id */
dest = (ia64_get_lid() >> 16) & 0xffff;
dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif
set_rte(vector, dest);
......
......@@ -30,6 +30,7 @@
#include <asm/bitops.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/machvec.h>
......@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* because the register and the memory stack are not
* switched atomically.
*/
asm ("mov %0=ar.bsp" : "=r"(bsp));
asm ("mov %0=sp" : "=r"(sp));
bsp = ia64_getreg(_IA64_REG_AR_BSP);
sp = ia64_getreg(_IA64_REG_AR_SP);
if ((sp - bsp) < 1024) {
static unsigned char count;
......@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows).
*/
saved_tpr = ia64_get_tpr();
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (!IS_RESCHEDULE(vector)) {
ia64_set_tpr(vector);
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs);
......@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* Disable interrupts and send EOI:
*/
local_irq_disable();
ia64_set_tpr(saved_tpr);
ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
}
ia64_eoi();
vector = ia64_get_ivr();
......@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
#ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu);
#else
phys_cpu_id = (ia64_get_lid() >> 16) & 0xffff;
phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif
/*
......
......@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_regval = 0;
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_set_cmcv(cmcv.cmcv_regval);
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected "
"machine check vector %#x setup and enabled.\n",
smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n",
smp_processor_id(), ia64_get_cmcv());
smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
}
/*
......@@ -531,11 +531,11 @@ void
ia64_mca_cmc_vector_disable (void *dummy)
{
cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv();
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval);
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.\n",
......@@ -558,11 +558,11 @@ void
ia64_mca_cmc_vector_enable (void *dummy)
{
cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv();
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval);
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.\n",
......@@ -727,10 +727,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
ia64_tpa(ia64_get_gp()),
ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler,
ia64_tpa(ia64_get_gp()),
ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_slave_init_handler_size)))
{
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
......@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void)
do {
switch(irr_num) {
case 0:
irr = ia64_get_irr0();
irr = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
irr = ia64_get_irr1();
irr = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
irr = ia64_get_irr2();
irr = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
irr = ia64_get_irr3();
irr = ia64_getreg(_IA64_REG_CR_IRR3);
break;
}
} while (!(irr & (1 << irr_bit))) ;
......@@ -1146,7 +1146,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs);
for (++cpuid ; !cpu_online(cpuid) && cpuid < NR_CPUS ; cpuid++);
if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
......@@ -1176,7 +1176,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
start_count = -1;
}
return IRQ_HANDLED;
}
......
......@@ -39,6 +39,7 @@
#include <asm/bitops.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/perfmon.h>
#include <asm/processor.h>
......@@ -671,39 +672,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx);
static inline void
pfm_clear_psr_pp(void)
{
__asm__ __volatile__ ("rsm psr.pp;; srlz.i;;"::: "memory");
ia64_rsm(IA64_PSR_PP);
ia64_srlz_i();
}
static inline void
pfm_set_psr_pp(void)
{
__asm__ __volatile__ ("ssm psr.pp;; srlz.i;;"::: "memory");
ia64_ssm(IA64_PSR_PP);
ia64_srlz_i();
}
static inline void
pfm_clear_psr_up(void)
{
__asm__ __volatile__ ("rsm psr.up;; srlz.i;;"::: "memory");
ia64_rsm(IA64_PSR_UP);
ia64_srlz_i();
}
static inline void
pfm_set_psr_up(void)
{
__asm__ __volatile__ ("ssm psr.up;; srlz.i;;"::: "memory");
ia64_ssm(IA64_PSR_UP);
ia64_srlz_i();
}
static inline unsigned long
pfm_get_psr(void)
{
unsigned long tmp;
__asm__ __volatile__ ("mov %0=psr;;": "=r"(tmp) :: "memory");
tmp = ia64_getreg(_IA64_REG_PSR);
ia64_srlz_i();
return tmp;
}
static inline void
pfm_set_psr_l(unsigned long val)
{
__asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(val): "memory");
ia64_setreg(_IA64_REG_PSR_L, val);
ia64_srlz_i();
}
static inline void
......@@ -970,7 +977,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp();
} else {
pfm_clear_psr_up();
......@@ -1017,7 +1024,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */
ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
}
pfm_set_psr_l(psr);
......@@ -1773,7 +1780,7 @@ pfm_syswide_force_stop(void *info)
/*
* Update local PMU
*/
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
* update local cpuinfo
......@@ -3944,7 +3951,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*
* disable dcr pp
*/
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
......@@ -4034,7 +4041,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_set_psr_pp();
/* enable dcr pp */
ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
return 0;
......@@ -4199,7 +4206,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
current->pid,
thread->pfm_context, ctx));
old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
goto error_unres;
......@@ -5459,13 +5466,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* if monitoring has started
*/
if (dcr_pp) {
dcr = ia64_get_dcr();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
/*
* context switching in?
*/
if (is_ctxswin) {
/* mask monitoring for the idle task */
ia64_set_dcr(dcr & ~IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp();
ia64_srlz_i();
return;
......@@ -5477,7 +5484,7 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* Due to inlining this odd if-then-else construction generates
* better code.
*/
ia64_set_dcr(dcr |IA64_DCR_PP);
ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
pfm_set_psr_pp();
ia64_srlz_i();
}
......@@ -6257,7 +6264,7 @@ pfm_init_percpu (void)
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d();
/*
......
......@@ -741,8 +741,8 @@ cpu_init (void)
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
* be fine).
*/
ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC);
ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if (current->mm)
......@@ -758,11 +758,11 @@ cpu_init (void)
ia64_set_itv(1 << 16);
ia64_set_lrr0(1 << 16);
ia64_set_lrr1(1 << 16);
ia64_set_pmv(1 << 16);
ia64_set_cmcv(1 << 16);
ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
/* clear TPR & XTP to enable all interrupt classes: */
ia64_set_tpr(0);
ia64_setreg(_IA64_REG_CR_TPR, 0);
#ifdef CONFIG_SMP
normal_xtp();
#endif
......
/*
* Architecture-specific signal handling support.
*
* Copyright (C) 1999-2002 Hewlett-Packard Co
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Derived from i386 and Alpha versions.
......@@ -23,6 +23,7 @@
#include <linux/wait.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
......@@ -41,6 +42,12 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
#ifdef ASM_SUPPORTED
/*
* Don't let GCC uses f16-f31 so that when we setup/restore the registers in the signal
* context in __kernel_sigtramp(), we can be sure that registers f16-f31 contain user-level
* values.
*/
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
......@@ -50,6 +57,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
......@@ -192,7 +200,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
......@@ -592,10 +600,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
if (IS_IA32_PROCESS(&scr->pt)) {
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
if (errno == ERESTART_RESTARTBLOCK) {
if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */
scr->pt.cr_iip -= 2;
}
} else {
/*
* Note: the syscall number is in r15 which is saved in
......
......@@ -7,6 +7,20 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/fpswa.h>
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
/*
* fp_emulate() needs to be able to access and update all floating point registers. Those
* saved in pt_regs can be accessed through that structure, but those not saved, will be
......@@ -15,6 +29,7 @@
* by declaring preserved registers that are not marked as "fixed" as global register
* variables.
*/
#ifdef ASM_SUPPORTED
register double f2 asm ("f2"); register double f3 asm ("f3");
register double f4 asm ("f4"); register double f5 asm ("f5");
......@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
#endif
extern spinlock_t timerlist_lock;
......@@ -357,6 +359,10 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x11) {
siginfo.si_code = FPE_FLTINV;
} else if (isr & 0x22) {
/* denormal operand gets the same si_code as underflow
* see arch/i386/kernel/traps.c:math_error() */
siginfo.si_code = FPE_FLTUND;
} else if (isr & 0x44) {
siginfo.si_code = FPE_FLTDIV;
}
......
......@@ -18,9 +18,10 @@
#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/rse.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
......@@ -231,7 +232,7 @@ static u16 fr_info[32]={
static void
invala_gr (int regno)
{
# define F(reg) case reg: __asm__ __volatile__ ("invala.e r%0" :: "i"(reg)); break
# define F(reg) case reg: ia64_invala_gr(reg); break
switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
......@@ -258,7 +259,7 @@ invala_gr (int regno)
static void
invala_fr (int regno)
{
# define F(reg) case reg: __asm__ __volatile__ ("invala.e f%0" :: "i"(reg)); break
# define F(reg) case reg: ia64_invala_fr(reg); break
switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
......@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
static inline void
float_spill_f0 (struct ia64_fpreg *final)
{
__asm__ __volatile__ ("stf.spill [%0]=f0" :: "r"(final) : "memory");
ia64_stf_spill(final, 0);
}
static inline void
float_spill_f1 (struct ia64_fpreg *final)
{
__asm__ __volatile__ ("stf.spill [%0]=f1" :: "r"(final) : "memory");
ia64_stf_spill(final, 1);
}
static void
......@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={
static inline void
mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldfe f6=[%0];; stf.spill [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldfe(6, init);
ia64_stop();
ia64_stf_spill(final, 6);
}
static inline void
mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldf8 f6=[%0];; stf.spill [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldf8(6, init);
ia64_stop();
ia64_stf_spill(final, 6);
}
static inline void
mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldfs f6=[%0];; stf.spill [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldfs(6, init);
ia64_stop();
ia64_stf_spill(final, 6);
}
static inline void
mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldfd f6=[%0];; stf.spill [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldfd(6, init);
ia64_stop();
ia64_stf_spill(final, 6);
}
static inline void
float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfe [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldf_fill(6, init);
ia64_stop();
ia64_stfe(final, 6);
}
static inline void
float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldf.fill f6=[%0];; stf8 [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldf_fill(6, init);
ia64_stop();
ia64_stf8(final, 6);
}
static inline void
float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfs [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldf_fill(6, init);
ia64_stop();
ia64_stfs(final, 6);
}
static inline void
float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfd [%1]=f6"
:: "r"(init), "r"(final) : "f6","memory");
ia64_ldf_fill(6, init);
ia64_stop();
ia64_stfd(final, 6);
}
static int
......
......@@ -35,6 +35,7 @@ SECTIONS
{
*(.text.ivt)
*(.text)
*(.gnu.linkonce.t*)
}
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
......@@ -183,7 +184,7 @@ SECTIONS
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
{ *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
......@@ -194,7 +195,7 @@ SECTIONS
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) }
{ *(.sdata) *(.sdata1) *(.srdata) }
_edata = .;
_bss = .;
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
......
......@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi
/*
* Flush ALAT entries also.
*/
asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2)
: "memory");
ia64_ptcga(start, (nbits<<2));
ia64_srlz_i();
start += (1UL << nbits);
} while (start < end);
}
......@@ -118,15 +118,13 @@ local_flush_tlb_all (void)
local_irq_save(flags);
for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) {
asm volatile ("ptc.e %0" :: "r"(addr));
ia64_ptce(addr);
addr += stride1;
}
addr += stride0;
}
local_irq_restore(flags);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
}
void
......@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
platform_global_tlb_purge(start, end, nbits);
# else
do {
asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
# endif
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
}
void __init
......
......@@ -200,7 +200,7 @@ efi_unimplemented (void)
#ifdef SGI_SN2
#undef cpu_physical_id
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
void
fprom_send_cpei(void) {
......@@ -224,14 +224,14 @@ fprom_send_cpei(void) {
#endif
static long
static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
register long r9 asm ("r9") = 0;
register long r10 asm ("r10") = 0;
register long r11 asm ("r11") = 0;
long r9 = 0;
long r10 = 0;
long r11 = 0;
long status;
/*
......@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
}
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
return status;
return ((struct sal_ret_values) {status, r9, r10, r11});
}
......
......@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) {
case 0:
irr_reg = ia64_get_irr0();
irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
irr_reg = ia64_get_irr1();
irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
irr_reg = ia64_get_irr2();
irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
irr_reg = ia64_get_irr3();
irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break;
}
if (!test_bit(irr_bit, &irr_reg) ) {
......@@ -354,9 +354,9 @@ sn_get_next_bit(void) {
void
sn_set_tpr(int vector) {
if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
ia64_set_tpr(vector);
ia64_setreg(_IA64_REG_CR_TPR, vector);
} else {
ia64_set_tpr(IA64_LAST_DEVICE_VECTOR);
ia64_setreg(_IA64_REG_CR_TPR, IA64_LAST_DEVICE_VECTOR);
}
}
......
......@@ -395,7 +395,7 @@ sn_cpu_init(void)
return;
cpuid = smp_processor_id();
cpuphyid = ((ia64_get_lid() >> 16) & 0xffff);
cpuphyid = ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff);
nasid = cpu_physical_id_to_nasid(cpuphyid);
cnode = nasid_to_cnodeid(nasid);
slice = cpu_physical_id_to_slice(cpuphyid);
......
......@@ -11,81 +11,73 @@
#include <asm/sn/sn2/io.h>
#undef __sn_inb
#undef __sn_inw
#undef __sn_inl
#undef __sn_outb
#undef __sn_outw
#undef __sn_outl
#undef __sn_readb
#undef __sn_readw
#undef __sn_readl
#undef __sn_readq
unsigned int
sn_inb (unsigned long port)
__sn_inb (unsigned long port)
{
return __sn_inb(port);
return ___sn_inb(port);
}
unsigned int
sn_inw (unsigned long port)
__sn_inw (unsigned long port)
{
return __sn_inw(port);
return ___sn_inw(port);
}
unsigned int
sn_inl (unsigned long port)
__sn_inl (unsigned long port)
{
return __sn_inl(port);
return ___sn_inl(port);
}
void
sn_outb (unsigned char val, unsigned long port)
__sn_outb (unsigned char val, unsigned long port)
{
__sn_outb(val, port);
___sn_outb(val, port);
}
void
sn_outw (unsigned short val, unsigned long port)
__sn_outw (unsigned short val, unsigned long port)
{
__sn_outw(val, port);
___sn_outw(val, port);
}
void
sn_outl (unsigned int val, unsigned long port)
__sn_outl (unsigned int val, unsigned long port)
{
__sn_outl(val, port);
___sn_outl(val, port);
}
unsigned char
sn_readb (void *addr)
__sn_readb (void *addr)
{
return __sn_readb (addr);
return ___sn_readb (addr);
}
unsigned short
sn_readw (void *addr)
__sn_readw (void *addr)
{
return __sn_readw (addr);
return ___sn_readw (addr);
}
unsigned int
sn_readl (void *addr)
__sn_readl (void *addr)
{
return __sn_readl (addr);
return ___sn_readl (addr);
}
unsigned long
sn_readq (void *addr)
__sn_readq (void *addr)
{
return __sn_readq (addr);
return ___sn_readq (addr);
}
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
......@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
......@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
......@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
......@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
......
......@@ -292,7 +292,7 @@ ffz (unsigned long x)
{
unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1)));
result = ia64_popcnt(x & (~x - 1));
return result;
}
......@@ -307,7 +307,7 @@ __ffs (unsigned long x)
{
unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x));
result = ia64_popcnt((x-1) & ~x);
return result;
}
......@@ -323,7 +323,7 @@ ia64_fls (unsigned long x)
long double d = x;
long exp;
__asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
exp = ia64_getf_exp(d);
return exp - 0xffff;
}
......@@ -349,7 +349,7 @@ static __inline__ unsigned long
hweight64 (unsigned long x)
{
unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x));
result = ia64_popcnt(x);
return result;
}
......
......@@ -7,13 +7,14 @@
*/
#include <asm/types.h>
#include <asm/intrinsics.h>
static __inline__ __const__ __u64
__ia64_swab64 (__u64 x)
{
__u64 result;
__asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x));
result = ia64_mux1(x, ia64_mux1_rev);
return result;
}
......
......@@ -6,8 +6,12 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/* In kernel mode, thread pointer (r13) is used to point to the
current task structure. */
register struct task_struct *current asm ("r13");
#include <asm/intrinsics.h>
/*
* In kernel mode, thread pointer (r13) is used to point to the current task
* structure.
*/
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */
......@@ -5,7 +5,7 @@
* Delay routines using a pre-computed "cycles/usec" value.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
......@@ -17,12 +17,14 @@
#include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
static __inline__ void
ia64_set_itm (unsigned long val)
{
__asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory");
ia64_setreg(_IA64_REG_CR_ITM, val);
ia64_srlz_d();
}
static __inline__ unsigned long
......@@ -30,20 +32,23 @@ ia64_get_itm (void)
{
unsigned long result;
__asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory");
result = ia64_getreg(_IA64_REG_CR_ITM);
ia64_srlz_d();
return result;
}
static __inline__ void
ia64_set_itv (unsigned long val)
{
__asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory");
ia64_setreg(_IA64_REG_CR_ITV, val);
ia64_srlz_d();
}
static __inline__ void
ia64_set_itc (unsigned long val)
{
__asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory");
ia64_setreg(_IA64_REG_AR_ITC, val);
ia64_srlz_d();
}
static __inline__ unsigned long
......@@ -51,10 +56,13 @@ ia64_get_itc (void)
{
unsigned long result;
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
#ifdef CONFIG_ITANIUM
while (unlikely((__s32) result == -1))
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
while (unlikely((__s32) result == -1)) {
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
}
#endif
return result;
}
......@@ -62,15 +70,11 @@ ia64_get_itc (void)
static __inline__ void
__delay (unsigned long loops)
{
unsigned long saved_ar_lc;
if (loops < 1)
return;
__asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc));
__asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1));
__asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
__asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
while (loops--)
ia64_nop(0);
}
static __inline__ void
......
This diff is collapsed.
/*
* Copyright (C) 2002,2003 Intel Corp.
* Jun Nakajima <jun.nakajima@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_IA64_IA64REGS_H
#define _ASM_IA64_IA64REGS_H
/*
* Register Names for getreg() and setreg().
*
* The "magic" numbers happen to match the values used by the Intel compiler's
* getreg()/setreg() intrinsics.
*/
/* Special Registers */
#define _IA64_REG_IP 1016 /* getreg only */
#define _IA64_REG_PSR 1019
#define _IA64_REG_PSR_L 1019
/* General Integer Registers */
#define _IA64_REG_GP 1025 /* R1 */
#define _IA64_REG_R8 1032 /* R8 */
#define _IA64_REG_R9 1033 /* R9 */
#define _IA64_REG_SP 1036 /* R12 */
#define _IA64_REG_TP 1037 /* R13 */
/* Application Registers */
#define _IA64_REG_AR_KR0 3072
#define _IA64_REG_AR_KR1 3073
#define _IA64_REG_AR_KR2 3074
#define _IA64_REG_AR_KR3 3075
#define _IA64_REG_AR_KR4 3076
#define _IA64_REG_AR_KR5 3077
#define _IA64_REG_AR_KR6 3078
#define _IA64_REG_AR_KR7 3079
#define _IA64_REG_AR_RSC 3088
#define _IA64_REG_AR_BSP 3089
#define _IA64_REG_AR_BSPSTORE 3090
#define _IA64_REG_AR_RNAT 3091
#define _IA64_REG_AR_FCR 3093
#define _IA64_REG_AR_EFLAG 3096
#define _IA64_REG_AR_CSD 3097
#define _IA64_REG_AR_SSD 3098
#define _IA64_REG_AR_CFLAG 3099
#define _IA64_REG_AR_FSR 3100
#define _IA64_REG_AR_FIR 3101
#define _IA64_REG_AR_FDR 3102
#define _IA64_REG_AR_CCV 3104
#define _IA64_REG_AR_UNAT 3108
#define _IA64_REG_AR_FPSR 3112
#define _IA64_REG_AR_ITC 3116
#define _IA64_REG_AR_PFS 3136
#define _IA64_REG_AR_LC 3137
#define _IA64_REG_AR_EC 3138
/* Control Registers */
#define _IA64_REG_CR_DCR 4096
#define _IA64_REG_CR_ITM 4097
#define _IA64_REG_CR_IVA 4098
#define _IA64_REG_CR_PTA 4104
#define _IA64_REG_CR_IPSR 4112
#define _IA64_REG_CR_ISR 4113
#define _IA64_REG_CR_IIP 4115
#define _IA64_REG_CR_IFA 4116
#define _IA64_REG_CR_ITIR 4117
#define _IA64_REG_CR_IIPA 4118
#define _IA64_REG_CR_IFS 4119
#define _IA64_REG_CR_IIM 4120
#define _IA64_REG_CR_IHA 4121
#define _IA64_REG_CR_LID 4160
#define _IA64_REG_CR_IVR 4161 /* getreg only */
#define _IA64_REG_CR_TPR 4162
#define _IA64_REG_CR_EOI 4163
#define _IA64_REG_CR_IRR0 4164 /* getreg only */
#define _IA64_REG_CR_IRR1 4165 /* getreg only */
#define _IA64_REG_CR_IRR2 4166 /* getreg only */
#define _IA64_REG_CR_IRR3 4167 /* getreg only */
#define _IA64_REG_CR_ITV 4168
#define _IA64_REG_CR_PMV 4169
#define _IA64_REG_CR_CMCV 4170
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
#define _IA64_REG_INDR_DBR 9001
#define _IA64_REG_INDR_IBR 9002
#define _IA64_REG_INDR_PKR 9003
#define _IA64_REG_INDR_PMC 9004
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
#endif /* _ASM_IA64_IA64REGS_H */
......@@ -8,8 +8,17 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef __ASSEMBLY__
#include <linux/config.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
# include <asm/intel_intrin.h>
#else
# include <asm/gcc_intrin.h>
#endif
/*
* Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value.
......@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
switch (sz) { \
case 4: \
__asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
break; \
\
case 8: \
__asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
break; \
\
default: \
......@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
(__typeof__(*(v))) (_tmp); /* return old value */ \
})
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
*/
extern void __xchg_called_with_bad_pointer (void);
static __inline__ unsigned long
__xchg (unsigned long x, volatile void *ptr, int size)
{
unsigned long result;
switch (size) {
case 1:
__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
: "r" (ptr), "r" (x) : "memory");
return result;
case 2:
__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
: "r" (ptr), "r" (x) : "memory");
return result;
case 4:
__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
: "r" (ptr), "r" (x) : "memory");
return result;
case 8:
__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
: "r" (ptr), "r" (x) : "memory");
return result;
}
__xchg_called_with_bad_pointer();
return x;
}
extern void ia64_xchg_called_with_bad_pointer (void);
#define __xchg(x,ptr,size) \
({ \
unsigned long __xchg_result; \
\
switch (size) { \
case 1: \
__xchg_result = ia64_xchg1((__u8 *)ptr, x); \
break; \
\
case 2: \
__xchg_result = ia64_xchg2((__u16 *)ptr, x); \
break; \
\
case 4: \
__xchg_result = ia64_xchg4((__u32 *)ptr, x); \
break; \
\
case 8: \
__xchg_result = ia64_xchg8((__u64 *)ptr, x); \
break; \
default: \
ia64_xchg_called_with_bad_pointer(); \
} \
__xchg_result; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
......@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size)
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern long __cmpxchg_called_with_bad_pointer(void);
extern long ia64_cmpxchg_called_with_bad_pointer (void);
#define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \
__typeof__(ptr) _p_ = (ptr); \
__typeof__(new) _n_ = (new); \
__u64 _o_, _r_; \
\
switch (size) { \
......@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void);
case 8: _o_ = (__u64) (long) (old); break; \
default: break; \
} \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \
case 1: \
__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
break; \
\
case 2: \
__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
break; \
\
case 4: \
__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
break; \
\
case 8: \
__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
break; \
\
default: \
_r_ = __cmpxchg_called_with_bad_pointer(); \
_r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \
} \
(__typeof__(old)) _r_; \
})
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
......@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \
extern int printk(const char *fmt, ...); \
asm ("mov %0=ip" : "=r"(ip)); \
ip = ia64_getreg(_IA64_REG_IP); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \
} \
......@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void);
# define CMPXCHG_BUGCHECK(v)
#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
#endif
#endif /* _ASM_IA64_INTRINSICS_H */
......@@ -52,6 +52,7 @@ extern unsigned int num_io_spaces;
# ifdef __KERNEL__
#include <asm/intrinsics.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
......@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address)
* Memory fence w/accept. This should never be used in code that is
* not IA-64 specific.
*/
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
#define __ia64_mf_a() ia64_mfa()
static inline const unsigned long
__ia64_get_io_port_base (void)
......
......@@ -155,7 +155,7 @@ struct ia64_machine_vector {
ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
};
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
{ \
......
......@@ -158,9 +158,7 @@ reload_context (mm_context_t context)
ia64_set_rr(0x4000000000000000, rr2);
ia64_set_rr(0x6000000000000000, rr3);
ia64_set_rr(0x8000000000000000, rr4);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
}
static inline void
......
......@@ -9,6 +9,7 @@
#include <linux/config.h>
#include <asm/intrinsics.h>
#include <asm/types.h>
/*
......@@ -143,7 +144,7 @@ get_order (unsigned long size)
double d = size - 1;
long order;
__asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d));
order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0)
order = 0;
......
......@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector
/* Initialize the processor controlled caches */
static inline s64
ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict)
ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
{
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict);
PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
return iprv.status;
}
......
This diff is collapsed.
......@@ -23,6 +23,8 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <asm/intrinsics.h>
/*
* the semaphore definition
*/
......@@ -81,9 +83,8 @@ init_rwsem (struct rw_semaphore *sem)
static inline void
__down_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
"=r"(result) : "r"(&sem->count) : "memory");
int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
if (result < 0)
rwsem_down_read_failed(sem);
}
......@@ -111,9 +112,8 @@ __down_write (struct rw_semaphore *sem)
static inline void
__up_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
"=r"(result) : "r"(&sem->count) : "memory");
int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
......
......@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
extern unsigned long sal_platform_features;
struct sal_ret_values {
long r8; long r9; long r10; long r11;
};
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */
......@@ -79,7 +79,6 @@ typedef struct siginfo {
* si_code is non-zero and __ISR_VALID is set in si_flags.
*/
#define si_isr _sifields._sigfault._isr
#define si_pfm_ovfl _sifields._sigprof._pfm_ovfl_counters
/*
* Flag values for si_flags:
......
......@@ -106,7 +106,7 @@ hard_smp_processor_id (void)
unsigned long bits;
} lid;
lid.bits = ia64_get_lid();
lid.bits = ia64_getreg(_IA64_REG_CR_LID);
return lid.f.id << 8 | lid.f.eid;
}
......
......@@ -11,11 +11,23 @@
extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
#include <asm/intrinsics.h>
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
#define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
......@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long);
*/
static inline unsigned int
__sn_inb (unsigned long port)
___sn_inb (unsigned long port)
{
volatile unsigned char *addr;
unsigned char ret = -1;
......@@ -40,7 +52,7 @@ __sn_inb (unsigned long port)
}
static inline unsigned int
__sn_inw (unsigned long port)
___sn_inw (unsigned long port)
{
volatile unsigned short *addr;
unsigned short ret = -1;
......@@ -54,7 +66,7 @@ __sn_inw (unsigned long port)
}
static inline unsigned int
__sn_inl (unsigned long port)
___sn_inl (unsigned long port)
{
volatile unsigned int *addr;
unsigned int ret = -1;
......@@ -68,7 +80,7 @@ __sn_inl (unsigned long port)
}
static inline void
__sn_outb (unsigned char val, unsigned long port)
___sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr;
......@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port)
}
static inline void
__sn_outw (unsigned short val, unsigned long port)
___sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr;
......@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port)
}
static inline void
__sn_outl (unsigned int val, unsigned long port)
___sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr;
......@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port)
*/
static inline unsigned char
__sn_readb (void *addr)
___sn_readb (void *addr)
{
unsigned char val;
......@@ -121,7 +133,7 @@ __sn_readb (void *addr)
}
static inline unsigned short
__sn_readw (void *addr)
___sn_readw (void *addr)
{
unsigned short val;
......@@ -132,7 +144,7 @@ __sn_readw (void *addr)
}
static inline unsigned int
__sn_readl (void *addr)
___sn_readl (void *addr)
{
unsigned int val;
......@@ -143,7 +155,7 @@ __sn_readl (void *addr)
}
static inline unsigned long
__sn_readq (void *addr)
___sn_readq (void *addr)
{
unsigned long val;
......
......@@ -89,7 +89,7 @@
#ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif
/*
......@@ -98,8 +98,8 @@
*/
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff)
#define get_slice() ((ia64_get_lid() >> 28) & 0xf)
#define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff)
#define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
/*
......
......@@ -9,11 +9,13 @@
* This file is used for SMP configurations only.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/intrinsics.h>
#include <asm/system.h>
typedef struct {
volatile unsigned int lock;
......@@ -102,8 +104,8 @@ typedef struct {
do { \
rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
while (*(volatile int *)__read_lock_ptr < 0) \
cpu_relax(); \
} \
......@@ -112,7 +114,7 @@ do { \
#define _raw_read_unlock(rw) \
do { \
rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#define _raw_write_lock(rw) \
......
......@@ -55,12 +55,6 @@ extern struct ia64_boot_param {
__u64 initrd_size;
} *ia64_boot_param;
static inline void
ia64_insn_group_barrier (void)
{
__asm__ __volatile__ (";;" ::: "memory");
}
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
......@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
#define mb() __asm__ __volatile__ ("mf" ::: "memory")
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
......@@ -119,22 +113,26 @@ ia64_insn_group_barrier (void)
/* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */
#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \
"rsm psr.i;;" \
: "=r" (x) :: "memory")
#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
:: "r" ((x) & IA64_PSR_I) \
: "p6", "p7", "memory")
#define __local_irq_save(x) \
do { \
(x) = ia64_getreg(_IA64_REG_PSR); \
ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
} while (0)
#define __local_irq_disable() \
do { \
ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
} while (0)
#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
#ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip;
# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip))
# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
# define local_irq_save(x) \
do { \
......@@ -164,14 +162,14 @@ do { \
# define local_irq_restore(x) __local_irq_restore(x)
#endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR))
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
(flags & IA64_PSR_I) == 0; \
unsigned long __ia64_id_flags; \
local_save_flags(__ia64_id_flags); \
(__ia64_id_flags & IA64_PSR_I) == 0; \
})
#ifdef __KERNEL__
......
......@@ -10,6 +10,7 @@
* Also removed cacheflush_time as it's entirely unused.
*/
#include <asm/intrinsics.h>
#include <asm/processor.h>
typedef unsigned long cycles_t;
......@@ -32,7 +33,7 @@ get_cycles (void)
{
cycles_t ret;
__asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret));
ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret;
}
......
......@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <asm/intrinsics.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
......@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
ia64_ptcl(addr, (PAGE_SHIFT << 2));
else
vma->vm_mm->context = 0;
#endif
......
......@@ -334,73 +334,20 @@ waitpid (int pid, int * wait_stat, int flags)
}
static inline int
execve (const char *filename, char *const av[], char *const ep[])
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_execve;
register long out0 asm("out0") = (long)filename;
register long out1 asm("out1") = (long)av;
register long out2 asm("out2") = (long)ep;
asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2)
: "2" (r15), "3" (out0), "4" (out1), "5" (out2)
: "memory", "out3", "out4", "out5", "out6", "out7",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
return r8;
}
static inline pid_t
clone (unsigned long flags, void *sp)
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_clone;
register long out0 asm("out0") = (long)flags;
register long out1 asm("out1") = (long)sp;
long retval;
/* clone clobbers current, hence the "r13" in the clobbers list */
asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1)
: "2" (r15), "3" (out0), "4" (out1)
: "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
retval = r8;
return retval;;
}
extern int execve (const char *filename, char *const av[], char *const ep[]);
extern pid_t clone (unsigned long flags, void *sp);
#endif /* __KERNEL_SYSCALLS__ */
/*
* "Conditional" syscalls
*
* What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on
* all toolchains, so we just do it by hand. Note, this macro can only be used in the
* file which defines sys_ni_syscall, i.e., in kernel/sys.c.
* Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
* kernel/sys.c. This version causes warnings because the declaration isn't a
* proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
* declarations have prototypes at the moment.
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall")));
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment