Commit 113265f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents 7c2b7264 050716fa
...@@ -66,7 +66,7 @@ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ ...@@ -66,7 +66,7 @@ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
boot := arch/ia64/boot boot := arch/ia64/hp/sim/boot
.PHONY: boot compressed check .PHONY: boot compressed check
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
# #
obj-y := hpsim_irq.o hpsim_setup.o obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o obj-$(CONFIG_HP_SIMETH) += simeth.o
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (C) 1998 by David Mosberger-Tang <davidm@hpl.hp.com> # Copyright (C) 1998, 2003 by David Mosberger-Tang <davidm@hpl.hp.com>
# #
targets-$(CONFIG_IA64_HP_SIM) += bootloader targets-$(CONFIG_IA64_HP_SIM) += bootloader
...@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE ...@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o \ $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/lib.a FORCE lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld) $(call if_changed,ld)
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
.bss
.align 16
stack_mem:
.skip 16834
.text
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
GLOBAL_ENTRY(printk)
break 0
END(printk)
GLOBAL_ENTRY(_start)
.prologue
.save rp, r0
.body
movl gp = __gp
movl sp = stack_mem
bsw.1
br.call.sptk.many rp=start_bootloader
END(_start)
GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0
mov r15=in4
break 0x80001
br.ret.sptk.many b0
END(ssc)
GLOBAL_ENTRY(jmp_to_kernel)
.regstk 2,0,0,0
mov r28=in0
mov b7=in1
br.sptk.few b7
END(jmp_to_kernel)
GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
movl r9=0x100000000 /* tc.base */
movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp
1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;;
1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */
mov r10=0 /* hints */
mov r11=0
br.cond.sptk.few rp
1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f
mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */
;;
mov ar.lc=r8
movl r8=0xe000000000000000
;;
.loop: fc r8
add r8=32,r8
br.cloop.sptk.few .loop
sync.i
;;
srlz.i
;;
mov ar.lc=r9
mov r8=r0
;;
1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */
mov r10=0 /* reserved */
mov r11=0 /* reserved */
mov r16=0xffff /* implemented PMC */
mov r17=0xffff /* implemented PMD */
add r18=8,r29 /* second index */
;;
st8 [r29]=r16,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store implemented PMD */
st8 [r18]=r0,16 /* clear remaining bits */
mov r16=0xf0 /* cycles count capable PMC */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
mov r17=0x10 /* retired bundles capable PMC */
;;
st8 [r29]=r16,16 /* store cycles capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r17,16 /* store retired bundle capable */
st8 [r18]=r0,16 /* clear remaining bits */
;;
st8 [r29]=r0,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */
;;
1: br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
END(pal_emulator_static)
/* /*
* arch/ia64/boot/bootloader.c * arch/ia64/hp/sim/boot/bootloader.c
* *
* Loads an ELF kernel. * Loads an ELF kernel.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* *
...@@ -17,31 +17,13 @@ struct task_struct; /* forward declaration for elf.h */ ...@@ -17,31 +17,13 @@ struct task_struct; /* forward declaration for elf.h */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/intrinsics.h>
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/system.h> #include <asm/system.h>
/* Simulator system calls: */ #include "ssc.h"
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_GET_ARGS 75
struct disk_req { struct disk_req {
unsigned long addr; unsigned long addr;
...@@ -53,10 +35,8 @@ struct disk_stat { ...@@ -53,10 +35,8 @@ struct disk_stat {
unsigned count; unsigned count;
}; };
#include "../kernel/fw-emu.c" extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
asm (".global printk; printk = 0");
/* /*
* Set a break point on this function so that symbols are available to set breakpoints in * Set a break point on this function so that symbols are available to set breakpoints in
...@@ -82,9 +62,8 @@ cons_write (const char *buf) ...@@ -82,9 +62,8 @@ cons_write (const char *buf)
#define MAX_ARGS 32 #define MAX_ARGS 32
void void
_start (void) start_bootloader (void)
{ {
static char stack[16384] __attribute__ ((aligned (16)));
static char mem[4096]; static char mem[4096];
static char buffer[1024]; static char buffer[1024];
unsigned long off; unsigned long off;
...@@ -98,10 +77,6 @@ _start (void) ...@@ -98,10 +77,6 @@ _start (void)
char *kpath, *args; char *kpath, *args;
long arglen = 0; long arglen = 0;
asm volatile ("movl gp=__gp;;" ::: "memory");
asm volatile ("mov sp=%0" :: "r"(stack) : "memory");
asm volatile ("bsw.1;;");
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
/* /*
...@@ -195,15 +170,14 @@ _start (void) ...@@ -195,15 +170,14 @@ _start (void)
cons_write("starting kernel...\n"); cons_write("starting kernel...\n");
/* fake an I/O base address: */ /* fake an I/O base address: */
asm volatile ("mov ar.k0=%0" :: "r"(0xffffc000000UL)); ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL);
bp = sys_fw_init(args, arglen); bp = sys_fw_init(args, arglen);
ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS);
debug_break(); debug_break();
asm volatile ("mov sp=%2; mov r28=%1; br.sptk.few %0" jmp_to_kernel((unsigned long) bp, e_entry);
:: "b"(e_entry), "r"(bp), "r"(__pa(&stack)));
cons_write("kernel returned!\n"); cons_write("kernel returned!\n");
ssc(-1, 0, 0, 0, SSC_EXIT); ssc(-1, 0, 0, 0, SSC_EXIT);
......
...@@ -3,9 +3,6 @@ ...@@ -3,9 +3,6 @@
* *
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*
* For the HP simulator, this file gets include in boot/bootloader.c.
* For SoftSDV, this file gets included in sys_softsdv.c.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -18,6 +15,8 @@ ...@@ -18,6 +15,8 @@
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/sal.h> #include <asm/sal.h>
#include "ssc.h"
#define MB (1024*1024UL) #define MB (1024*1024UL)
#define SIMPLE_MEMMAP 1 #define SIMPLE_MEMMAP 1
...@@ -37,27 +36,6 @@ static char fw_mem[( sizeof(struct ia64_boot_param) ...@@ -37,27 +36,6 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
+ NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
+ 1024)] __attribute__ ((aligned (8))); + 1024)] __attribute__ ((aligned (8)));
#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
/* Simulator system calls: */
#define SSC_EXIT 66
/*
* Simulator system call.
*/
static long
ssc (long arg0, long arg1, long arg2, long arg3, int nr)
{
register long r8 asm ("r8");
asm volatile ("mov r15=%1\n\t"
"break 0x80001"
: "=r"(r8)
: "r"(nr), "r"(arg0), "r"(arg1), "r"(arg2), "r"(arg3));
return r8;
}
#define SECS_PER_HOUR (60 * 60) #define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24) #define SECS_PER_DAY (SECS_PER_HOUR * 24)
...@@ -119,109 +97,8 @@ offtime (unsigned long t, efi_time_t *tp) ...@@ -119,109 +97,8 @@ offtime (unsigned long t, efi_time_t *tp)
return 1; return 1;
} }
#endif /* CONFIG_IA64_HP_SIM */
/*
* Very ugly, but we need this in the simulator only. Once we run on
* real hw, this can all go away.
*/
extern void pal_emulator_static (void); extern void pal_emulator_static (void);
asm (
" .proc pal_emulator_static\n"
"pal_emulator_static:"
" mov r8=-1\n"
" mov r9=256\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n"
"(p6) br.cond.sptk.few static\n"
" ;;\n"
" mov r9=512\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28\n"
"(p6) br.cond.sptk.few stacked\n"
" ;;\n"
"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" ;;\n"
" mov r8=0 /* status = 0 */\n"
" movl r9=0x100000000 /* tc.base */\n"
" movl r10=0x0000000200000003 /* count[0], count[1] */\n"
" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x100000064 /* proc_ratio (1/100) */\n"
" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n"
" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n"
" ;;\n"
"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" mov r9=96 /* num phys stacked */\n"
" mov r10=0 /* hints */\n"
" mov r11=0\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r9=ar.lc\n"
" movl r8=524288 /* flush 512k million cache lines (16MB) */\n"
" ;;\n"
" mov ar.lc=r8\n"
" movl r8=0xe000000000000000\n"
" ;;\n"
".loop: fc r8\n"
" add r8=32,r8\n"
" br.cloop.sptk.few .loop\n"
" sync.i\n"
" ;;\n"
" srlz.i\n"
" ;;\n"
" mov ar.lc=r9\n"
" mov r8=r0\n"
" ;;\n"
"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */\n"
" mov r10=0 /* reserved */\n"
" mov r11=0 /* reserved */\n"
" mov r16=0xffff /* implemented PMC */\n"
" mov r17=0xffff /* implemented PMD */\n"
" add r18=8,r29 /* second index */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store implemented PMD */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r16=0xf0 /* cycles count capable PMC */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" mov r17=0x10 /* retired bundles capable PMC */\n"
" ;;\n"
" st8 [r29]=r16,16 /* store cycles capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r17,16 /* store retired bundle capable */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
" st8 [r29]=r0,16 /* store implemented PMC */\n"
" st8 [r18]=r0,16 /* clear remaining bits */\n"
" ;;\n"
"1: br.cond.sptk.few rp\n"
"stacked:\n"
" br.ret.sptk.few rp\n"
" .endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
...@@ -268,14 +145,14 @@ efi_unimplemented (void) ...@@ -268,14 +145,14 @@ efi_unimplemented (void)
return EFI_UNSUPPORTED; return EFI_UNSUPPORTED;
} }
static long static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2, sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7) unsigned long in6, unsigned long in7)
{ {
register long r9 asm ("r9") = 0; long r9 = 0;
register long r10 asm ("r10") = 0; long r10 = 0;
register long r11 asm ("r11") = 0; long r11 = 0;
long status; long status;
/* /*
...@@ -357,8 +234,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, ...@@ -357,8 +234,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} else { } else {
status = -1; status = -1;
} }
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); return ((struct sal_ret_values) {status, r9, r10, r11});
return status;
} }
...@@ -427,7 +303,7 @@ sys_fw_init (const char *args, int arglen) ...@@ -427,7 +303,7 @@ sys_fw_init (const char *args, int arglen)
efi_systab->hdr.headersize = sizeof(efi_systab->hdr); efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0");
efi_systab->fw_revision = 1; efi_systab->fw_revision = 1;
efi_systab->runtime = __pa(efi_runtime); efi_systab->runtime = (void *) __pa(efi_runtime);
efi_systab->nr_tables = 1; efi_systab->nr_tables = 1;
efi_systab->tables = __pa(efi_tables); efi_systab->tables = __pa(efi_tables);
......
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*/
#ifndef ssc_h
#define ssc_h
/* Simulator system calls: */
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21
#define SSC_PUTCHAR 31
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_CONNECT_INTERRUPT 58
#define SSC_GENERATE_INTERRUPT 59
#define SSC_SET_PERIODIC_INTERRUPT 60
#define SSC_GET_RTC 65
#define SSC_EXIT 66
#define SSC_LOAD_SYMBOLS 69
#define SSC_GET_TOD 74
#define SSC_GET_ARGS 75
/*
* Simulator system call.
*/
extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr);
#endif /* ssc_h */
#include <asm/asmmacro.h>
/*
* Simulator system call.
*/
GLOBAL_ENTRY(ia64_ssc)
mov r15=r36
break 0x80001
br.ret.sptk.many rp
END(ia64_ssc)
...@@ -25,19 +25,6 @@ ...@@ -25,19 +25,6 @@
#include "hpsim_ssc.h" #include "hpsim_ssc.h"
/*
* Simulator system call.
*/
asm (".text\n"
".align 32\n"
".global ia64_ssc\n"
".proc ia64_ssc\n"
"ia64_ssc:\n"
"mov r15=r36\n"
"break 0x80001\n"
"br.ret.sptk.many rp\n"
".endp\n");
void void
ia64_ssc_connect_irq (long intr, long irq) ia64_ssc_connect_irq (long intr, long irq)
{ {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/rse.h> #include <asm/rse.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
...@@ -41,6 +42,11 @@ ...@@ -41,6 +42,11 @@
#define __IA32_NR_sigreturn 119 #define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173 #define __IA32_NR_rt_sigreturn 173
#ifdef ASM_SUPPORTED
/*
* Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
* restore_ia32_fpstate_live() can be sure the live register contain user-level state.
*/
register double f16 asm ("f16"); register double f17 asm ("f17"); register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19"); register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21"); register double f20 asm ("f20"); register double f21 asm ("f21");
...@@ -50,6 +56,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -50,6 +56,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
struct sigframe_ia32 struct sigframe_ia32
{ {
...@@ -198,30 +205,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) ...@@ -198,30 +205,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* All other fields unused... * All other fields unused...
*/ */
#define __ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
})
#define __stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define __stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
__asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
static int static int
save_ia32_fpstate_live (struct _fpstate_ia32 *save) save_ia32_fpstate_live (struct _fpstate_ia32 *save)
{ {
...@@ -238,18 +221,19 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -238,18 +221,19 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT; return -EFAULT;
/* Readin fsr, fcr, fir, fdr and copy onto fpstate */ /* Read in fsr, fcr, fir, fdr and copy onto fpstate */
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); fcr = ia64_getreg(_IA64_REG_AR_FCR);
asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); fir = ia64_getreg(_IA64_REG_AR_FIR);
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); fdr = ia64_getreg(_IA64_REG_AR_FDR);
/* /*
* We need to clear the exception state before calling the signal handler. Clear * We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction. * instruction.
*/ */
new_fsr = fsr & ~0x80ff; new_fsr = fsr & ~0x80ff;
asm volatile ( "mov ar.fsr=%0;" :: "r"(new_fsr)); ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
__put_user(fcr & 0xffff, &save->cw); __put_user(fcr & 0xffff, &save->cw);
__put_user(fsr & 0xffff, &save->sw); __put_user(fsr & 0xffff, &save->sw);
...@@ -286,45 +270,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -286,45 +270,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia64f2ia32f(fpregp, &ptp->f11); ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12); ia64_stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13); ia64_stfe(fpregp, 13);
copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 14); ia64_stfe(fpregp, 14);
copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 15); ia64_stfe(fpregp, 15);
copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stf8(&num128[0], 16); ia64_stf8(&num128[0], 16);
__stf8(&num128[1], 17); ia64_stf8(&num128[1], 17);
copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 18); ia64_stf8(&num128[0], 18);
__stf8(&num128[1], 19); ia64_stf8(&num128[1], 19);
copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 20); ia64_stf8(&num128[0], 20);
__stf8(&num128[1], 21); ia64_stf8(&num128[1], 21);
copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 22); ia64_stf8(&num128[0], 22);
__stf8(&num128[1], 23); ia64_stf8(&num128[1], 23);
copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 24); ia64_stf8(&num128[0], 24);
__stf8(&num128[1], 25); ia64_stf8(&num128[1], 25);
copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 26); ia64_stf8(&num128[0], 26);
__stf8(&num128[1], 27); ia64_stf8(&num128[1], 27);
copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 28); ia64_stf8(&num128[0], 28);
__stf8(&num128[1], 29); ia64_stf8(&num128[1], 29);
copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
__stf8(&num128[0], 30); ia64_stf8(&num128[0], 30);
__stf8(&num128[1], 31); ia64_stf8(&num128[1], 31);
copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
return 0; return 0;
} }
...@@ -354,10 +338,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -354,10 +338,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
* should remain same while writing. * should remain same while writing.
* So, we do a read, change specific fields and write. * So, we do a read, change specific fields and write.
*/ */
asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); fcr = ia64_getreg(_IA64_REG_AR_FCR);
asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); fir = ia64_getreg(_IA64_REG_AR_FIR);
asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); fdr = ia64_getreg(_IA64_REG_AR_FDR);
__get_user(mxcsr, (unsigned int *)&save->mxcsr); __get_user(mxcsr, (unsigned int *)&save->mxcsr);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
...@@ -391,10 +375,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -391,10 +375,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64 = (num64 << 32) | lo; num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffff)) | num64; fdr = (fdr & (~0xffffffffffff)) | num64;
asm volatile ( "mov ar.fsr=%0;" :: "r"(fsr)); ia64_setreg(_IA64_REG_AR_FSR, fsr);
asm volatile ( "mov ar.fcr=%0;" :: "r"(fcr)); ia64_setreg(_IA64_REG_AR_FCR, fcr);
asm volatile ( "mov ar.fir=%0;" :: "r"(fir)); ia64_setreg(_IA64_REG_AR_FIR, fir);
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr)); ia64_setreg(_IA64_REG_AR_FDR, fdr);
/* /*
* restore f8..f11 onto pt_regs * restore f8..f11 onto pt_regs
...@@ -420,45 +404,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -420,45 +404,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f11, fpregp); ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp); ia64_ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(13, fpregp); ia64_ldfe(13, fpregp);
copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(14, fpregp); ia64_ldfe(14, fpregp);
copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(15, fpregp); ia64_ldfe(15, fpregp);
copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
__ldf8(16, &num128[0]); ia64_ldf8(16, &num128[0]);
__ldf8(17, &num128[1]); ia64_ldf8(17, &num128[1]);
copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
__ldf8(18, &num128[0]); ia64_ldf8(18, &num128[0]);
__ldf8(19, &num128[1]); ia64_ldf8(19, &num128[1]);
copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
__ldf8(20, &num128[0]); ia64_ldf8(20, &num128[0]);
__ldf8(21, &num128[1]); ia64_ldf8(21, &num128[1]);
copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
__ldf8(22, &num128[0]); ia64_ldf8(22, &num128[0]);
__ldf8(23, &num128[1]); ia64_ldf8(23, &num128[1]);
copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
__ldf8(24, &num128[0]); ia64_ldf8(24, &num128[0]);
__ldf8(25, &num128[1]); ia64_ldf8(25, &num128[1]);
copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
__ldf8(26, &num128[0]); ia64_ldf8(26, &num128[0]);
__ldf8(27, &num128[1]); ia64_ldf8(27, &num128[1]);
copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
__ldf8(28, &num128[0]); ia64_ldf8(28, &num128[0]);
__ldf8(29, &num128[1]); ia64_ldf8(29, &num128[1]);
copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
__ldf8(30, &num128[0]); ia64_ldf8(30, &num128[0]);
__ldf8(31, &num128[1]); ia64_ldf8(31, &num128[1]);
return 0; return 0;
} }
...@@ -705,7 +689,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate ...@@ -705,7 +689,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate
/* /*
* `eflags' is in an ar register for this context * `eflags' is in an ar register for this context
*/ */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); flag = ia64_getreg(_IA64_REG_AR_EFLAG);
err |= __put_user((unsigned int)flag, &sc->eflags); err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal); err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss); err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
...@@ -790,10 +774,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int * ...@@ -790,10 +774,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
* IA32 process's context. * IA32 process's context.
*/ */
err |= __get_user(tmpflags, &sc->eflags); err |= __get_user(tmpflags, &sc->eflags);
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); flag = ia64_getreg(_IA64_REG_AR_EFLAG);
flag &= ~0x40DD5; flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5); flag |= (tmpflags & 0x40DD5);
asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag)); ia64_setreg(_IA64_REG_AR_EFLAG, flag);
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task) ...@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task)
void void
ia32_save_state (struct task_struct *t) ia32_save_state (struct task_struct *t)
{ {
unsigned long eflag, fsr, fcr, fir, fdr; t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
asm ("mov %0=ar.eflag;" t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
"mov %1=ar.fsr;" t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
"mov %2=ar.fcr;" t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
} }
...@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t) ...@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t)
fdr = t->thread.fdr; fdr = t->thread.fdr;
tssd = load_desc(_TSS(nr)); /* TSSD */ tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;" ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
"mov ar.fsr=%1;" ia64_setreg(_IA64_REG_AR_FSR, fsr);
"mov ar.fcr=%2;" ia64_setreg(_IA64_REG_AR_FCR, fcr);
"mov ar.fir=%3;" ia64_setreg(_IA64_REG_AR_FIR, fir);
"mov ar.fdr=%4;" ia64_setreg(_IA64_REG_AR_FDR, fdr);
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
...@@ -178,7 +170,7 @@ void ...@@ -178,7 +170,7 @@ void
ia32_cpu_init (void) ia32_cpu_init (void)
{ {
/* initialize global ia32 state - CR0 and CR4 */ /* initialize global ia32 state - CR0 and CR4 */
asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0)); ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
} }
static int __init static int __init
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "ia32priv.h" #include "ia32priv.h"
#include <asm/intrinsics.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
int int
...@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) ...@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{ {
unsigned long fsr, fcr; unsigned long fsr, fcr;
asm ("mov %0=ar.fsr;" fsr = ia64_getreg(_IA64_REG_AR_FSR);
"mov %1=ar.fcr;" fcr = ia64_getreg(_IA64_REG_AR_FCR);
: "=r"(fsr), "=r"(fcr));
siginfo.si_signo = SIGFPE; siginfo.si_signo = SIGFPE;
/* /*
......
...@@ -446,16 +446,18 @@ extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, ...@@ -446,16 +446,18 @@ extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long,
extern void ia32_load_segment_descriptors (struct task_struct *task); extern void ia32_load_segment_descriptors (struct task_struct *task);
#define ia32f2ia64f(dst,src) \ #define ia32f2ia64f(dst,src) \
do { \ do { \
register double f6 asm ("f6"); \ ia64_ldfe(6,src); \
asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ ia64_stop(); \
} while(0) ia64_stf_spill(dst, 6); \
} while(0)
#define ia64f2ia32f(dst,src) \ #define ia64f2ia32f(dst,src) \
do { \ do { \
register double f6 asm ("f6"); \ ia64_ldf_fill(6, src); \
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ ia64_stop(); \
} while(0) ia64_stfe(dst, 6); \
} while(0)
struct user_regs_struct32 { struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax; __u32 ebx, ecx, edx, esi, edi, ebp, eax;
...@@ -468,11 +470,8 @@ struct user_regs_struct32 { ...@@ -468,11 +470,8 @@ struct user_regs_struct32 {
}; };
/* Prototypes for use in elfcore32.h */ /* Prototypes for use in elfcore32.h */
int save_ia32_fpstate (struct task_struct *tsk, extern int save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save);
struct ia32_user_i387_struct *save); extern int save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save);
int save_ia32_fpxstate (struct task_struct *tsk,
struct ia32_user_fxsr_struct *save);
#endif /* !CONFIG_IA32_SUPPORT */ #endif /* !CONFIG_IA32_SUPPORT */
......
...@@ -51,9 +51,10 @@ ...@@ -51,9 +51,10 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/vfs.h> #include <linux/vfs.h>
#include <asm/intrinsics.h>
#include <asm/semaphore.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/semaphore.h>
#include "ia32priv.h" #include "ia32priv.h"
...@@ -2192,7 +2193,7 @@ sys32_iopl (int level) ...@@ -2192,7 +2193,7 @@ sys32_iopl (int level)
if (level != 3) if (level != 3)
return(-EINVAL); return(-EINVAL);
/* Trying to gain more privileges? */ /* Trying to gain more privileges? */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); old = ia64_getreg(_IA64_REG_AR_EFLAG);
if ((unsigned int) level > ((old >> 12) & 3)) { if ((unsigned int) level > ((old >> 12) & 3)) {
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
...@@ -2216,7 +2217,7 @@ sys32_iopl (int level) ...@@ -2216,7 +2217,7 @@ sys32_iopl (int level)
if (addr >= 0) { if (addr >= 0) {
old = (old & ~0x3000) | (level << 12); old = (old & ~0x3000) | (level << 12);
asm volatile ("mov ar.eflag=%0;;" :: "r"(old)); ia64_setreg(_IA64_REG_AR_EFLAG, old);
} }
fput(file); fput(file);
......
...@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall) ...@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall)
br.ret.sptk.many rp br.ret.sptk.many rp
END(__ia64_syscall) END(__ia64_syscall)
GLOBAL_ENTRY(execve)
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(execve)
GLOBAL_ENTRY(clone)
mov r15=__NR_clone // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
END(clone)
/* /*
* We invoke syscall_trace through this intermediate function to * We invoke syscall_trace through this intermediate function to
* ensure that the syscall input arguments are not clobbered. We * ensure that the syscall input arguments are not clobbered. We
......
...@@ -39,4 +39,4 @@ static union { ...@@ -39,4 +39,4 @@ static union {
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task) .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}}; }};
asm (".global init_task; init_task = init_task_mem"); extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
...@@ -495,7 +495,7 @@ iosapic_register_intr (unsigned int gsi, ...@@ -495,7 +495,7 @@ iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger) unsigned long polarity, unsigned long trigger)
{ {
int vector; int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = gsi_to_vector(gsi); vector = gsi_to_vector(gsi);
if (vector < 0) if (vector < 0)
...@@ -572,7 +572,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, ...@@ -572,7 +572,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long trigger) unsigned long trigger)
{ {
int vector; int vector;
unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = isa_irq_to_vector(isa_irq); vector = isa_irq_to_vector(isa_irq);
...@@ -666,11 +666,11 @@ iosapic_enable_intr (unsigned int vector) ...@@ -666,11 +666,11 @@ iosapic_enable_intr (unsigned int vector)
* Direct the interrupt vector to the current cpu, platform redirection * Direct the interrupt vector to the current cpu, platform redirection
* will distribute them. * will distribute them.
*/ */
dest = (ia64_get_lid() >> 16) & 0xffff; dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
} }
#else #else
/* direct the interrupt vector to the running cpu id */ /* direct the interrupt vector to the running cpu id */
dest = (ia64_get_lid() >> 16) & 0xffff; dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif #endif
set_rte(vector, dest); set_rte(vector, dest);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/intrinsics.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/machvec.h> #include <asm/machvec.h>
...@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* because the register and the memory stack are not * because the register and the memory stack are not
* switched atomically. * switched atomically.
*/ */
asm ("mov %0=ar.bsp" : "=r"(bsp)); bsp = ia64_getreg(_IA64_REG_AR_BSP);
asm ("mov %0=sp" : "=r"(sp)); sp = ia64_getreg(_IA64_REG_AR_SP);
if ((sp - bsp) < 1024) { if ((sp - bsp) < 1024) {
static unsigned char count; static unsigned char count;
...@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead * 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows). * to kernel stack overflows).
*/ */
saved_tpr = ia64_get_tpr(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d(); ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (!IS_RESCHEDULE(vector)) { if (!IS_RESCHEDULE(vector)) {
ia64_set_tpr(vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d(); ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs); do_IRQ(local_vector_to_irq(vector), regs);
...@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* Disable interrupts and send EOI: * Disable interrupts and send EOI:
*/ */
local_irq_disable(); local_irq_disable();
ia64_set_tpr(saved_tpr); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
} }
ia64_eoi(); ia64_eoi();
vector = ia64_get_ivr(); vector = ia64_get_ivr();
...@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) ...@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu); phys_cpu_id = cpu_physical_id(cpu);
#else #else
phys_cpu_id = (ia64_get_lid() >> 16) & 0xffff; phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif #endif
/* /*
......
...@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void) ...@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_regval = 0; cmcv.cmcv_regval = 0;
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR; cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected "
"machine check vector %#x setup and enabled.\n", "machine check vector %#x setup and enabled.\n",
smp_processor_id(), IA64_CMC_VECTOR); smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n", IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n",
smp_processor_id(), ia64_get_cmcv()); smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
} }
/* /*
...@@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy) ...@@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv(); cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.\n", "machine check vector %#x disabled.\n",
...@@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy) ...@@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv(); cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected " IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.\n", "machine check vector %#x enabled.\n",
...@@ -727,10 +727,10 @@ ia64_mca_init(void) ...@@ -727,10 +727,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */ /* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler, ia64_mc_info.imi_monarch_init_handler,
ia64_tpa(ia64_get_gp()), ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler, ia64_mc_info.imi_slave_init_handler,
ia64_tpa(ia64_get_gp()), ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_slave_init_handler_size))) ia64_mc_info.imi_slave_init_handler_size)))
{ {
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. " printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
...@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void) ...@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void)
do { do {
switch(irr_num) { switch(irr_num) {
case 0: case 0:
irr = ia64_get_irr0(); irr = ia64_getreg(_IA64_REG_CR_IRR0);
break; break;
case 1: case 1:
irr = ia64_get_irr1(); irr = ia64_getreg(_IA64_REG_CR_IRR1);
break; break;
case 2: case 2:
irr = ia64_get_irr2(); irr = ia64_getreg(_IA64_REG_CR_IRR2);
break; break;
case 3: case 3:
irr = ia64_get_irr3(); irr = ia64_getreg(_IA64_REG_CR_IRR3);
break; break;
} }
} while (!(irr & (1 << irr_bit))) ; } while (!(irr & (1 << irr_bit))) ;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/intrinsics.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/perfmon.h> #include <asm/perfmon.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -255,6 +256,8 @@ typedef struct { ...@@ -255,6 +256,8 @@ typedef struct {
/* /*
* 64-bit software counter structure * 64-bit software counter structure
*
* the next_reset_type is applied to the next call to pfm_reset_regs()
*/ */
typedef struct { typedef struct {
unsigned long val; /* virtual 64bit counter value */ unsigned long val; /* virtual 64bit counter value */
...@@ -266,7 +269,7 @@ typedef struct { ...@@ -266,7 +269,7 @@ typedef struct {
unsigned long seed; /* seed for random-number generator */ unsigned long seed; /* seed for random-number generator */
unsigned long mask; /* mask for random-number generator */ unsigned long mask; /* mask for random-number generator */
unsigned int flags; /* notify/do not notify */ unsigned int flags; /* notify/do not notify */
unsigned int reserved; /* for future use */ int next_reset_type;/* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
unsigned long eventid; /* overflow event identifier */ unsigned long eventid; /* overflow event identifier */
} pfm_counter_t; } pfm_counter_t;
...@@ -556,7 +559,6 @@ static struct vm_operations_struct pfm_vm_ops={ ...@@ -556,7 +559,6 @@ static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close close: pfm_vm_close
}; };
#define pfm_wait_task_inactive(t) wait_task_inactive(t) #define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b) #define pfm_get_cpu_data(a,b) per_cpu(a, b)
...@@ -647,7 +649,6 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner); ...@@ -647,7 +649,6 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number); DEFINE_PER_CPU(unsigned long, pmu_activation_number);
/* forward declaration */ /* forward declaration */
static struct file_operations pfm_file_ops; static struct file_operations pfm_file_ops;
...@@ -671,39 +672,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx); ...@@ -671,39 +672,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx);
static inline void static inline void
pfm_clear_psr_pp(void) pfm_clear_psr_pp(void)
{ {
__asm__ __volatile__ ("rsm psr.pp;; srlz.i;;"::: "memory"); ia64_rsm(IA64_PSR_PP);
ia64_srlz_i();
} }
static inline void static inline void
pfm_set_psr_pp(void) pfm_set_psr_pp(void)
{ {
__asm__ __volatile__ ("ssm psr.pp;; srlz.i;;"::: "memory"); ia64_ssm(IA64_PSR_PP);
ia64_srlz_i();
} }
static inline void static inline void
pfm_clear_psr_up(void) pfm_clear_psr_up(void)
{ {
__asm__ __volatile__ ("rsm psr.up;; srlz.i;;"::: "memory"); ia64_rsm(IA64_PSR_UP);
ia64_srlz_i();
} }
static inline void static inline void
pfm_set_psr_up(void) pfm_set_psr_up(void)
{ {
__asm__ __volatile__ ("ssm psr.up;; srlz.i;;"::: "memory"); ia64_ssm(IA64_PSR_UP);
ia64_srlz_i();
} }
static inline unsigned long static inline unsigned long
pfm_get_psr(void) pfm_get_psr(void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ("mov %0=psr;;": "=r"(tmp) :: "memory"); tmp = ia64_getreg(_IA64_REG_PSR);
ia64_srlz_i();
return tmp; return tmp;
} }
static inline void static inline void
pfm_set_psr_l(unsigned long val) pfm_set_psr_l(unsigned long val)
{ {
__asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(val): "memory"); ia64_setreg(_IA64_REG_PSR_L, val);
ia64_srlz_i();
} }
static inline void static inline void
...@@ -970,7 +977,7 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -970,7 +977,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */ /* disable dcr pp */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp(); pfm_clear_psr_pp();
} else { } else {
pfm_clear_psr_up(); pfm_clear_psr_up();
...@@ -1017,7 +1024,7 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -1017,7 +1024,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */ /* enable dcr pp */
ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
} }
pfm_set_psr_l(psr); pfm_set_psr_l(psr);
...@@ -1525,7 +1532,7 @@ pfm_lseek(struct file *file, loff_t offset, int whence) ...@@ -1525,7 +1532,7 @@ pfm_lseek(struct file *file, loff_t offset, int whence)
} }
static ssize_t static ssize_t
pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos) pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
pfm_msg_t *msg; pfm_msg_t *msg;
...@@ -1621,18 +1628,6 @@ pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos) ...@@ -1621,18 +1628,6 @@ pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
return ret; return ret;
} }
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
int oldvar, ret;
oldvar = pfm_debug_var;
pfm_debug_var = pfm_sysctl.debug_pfm_read;
ret = pfm_do_read(filp, buf, size, ppos);
pfm_debug_var = oldvar;
return ret;
}
static ssize_t static ssize_t
pfm_write(struct file *file, const char *ubuf, pfm_write(struct file *file, const char *ubuf,
size_t size, loff_t *ppos) size_t size, loff_t *ppos)
...@@ -1773,7 +1768,7 @@ pfm_syswide_force_stop(void *info) ...@@ -1773,7 +1768,7 @@ pfm_syswide_force_stop(void *info)
/* /*
* Update local PMU * Update local PMU
*/ */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
/* /*
* update local cpuinfo * update local cpuinfo
...@@ -2752,20 +2747,18 @@ pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag) ...@@ -2752,20 +2747,18 @@ pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag)); DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag));
if (flag == PFM_PMD_NO_RESET) return;
/* /*
* now restore reset value on sampling overflowed counters * now restore reset value on sampling overflowed counters
*/ */
mask >>= PMU_FIRST_COUNTER; mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if (mask & 0x1) {
if ((mask & 0x1UL) == 0UL) continue;
ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
is_long_reset ? "long" : "short", i, val));
}
} }
/* /*
...@@ -2804,16 +2797,16 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag) ...@@ -2804,16 +2797,16 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
*/ */
mask >>= PMU_FIRST_COUNTER; mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if (mask & 0x1) {
if ((mask & 0x1UL) == 0UL) continue;
val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
is_long_reset ? "long" : "short", i, val));
pfm_write_soft_counter(ctx, i, val); pfm_write_soft_counter(ctx, i, val);
} }
}
/* /*
* Now take care of resetting the other registers * Now take care of resetting the other registers
...@@ -2854,7 +2847,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2854,7 +2847,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (is_loaded) { if (is_loaded) {
thread = &ctx->ctx_task->thread; thread = &ctx->ctx_task->thread;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0; can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
...@@ -3562,51 +3555,49 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3562,51 +3555,49 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
struct task_struct *task; struct task_struct *task;
pfm_buffer_fmt_t *fmt; pfm_buffer_fmt_t *fmt;
pfm_ovfl_ctrl_t rst_ctrl; pfm_ovfl_ctrl_t rst_ctrl;
int is_loaded; int state, is_system;
int ret = 0; int ret = 0;
state = ctx->ctx_state;
fmt = ctx->ctx_buf_fmt; fmt = ctx->ctx_buf_fmt;
is_loaded = CTX_IS_LOADED(ctx); is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
if (is_loaded && CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) goto proceed;
/* switch(state) {
* restarting a terminated context is a nop case PFM_CTX_MASKED:
*/ break;
if (unlikely(CTX_IS_TERMINATED(ctx))) { case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
/* fall through */
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
return -EBUSY;
case PFM_CTX_TERMINATED:
DPRINT(("context is terminated, nothing to do\n")); DPRINT(("context is terminated, nothing to do\n"));
return 0; return 0;
default:
DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
return -EINVAL;
} }
/*
* LOADED, UNLOADED, ZOMBIE
*/
if (CTX_IS_MASKED(ctx) == 0) return -EBUSY;
proceed:
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
task = PFM_CTX_TASK(ctx);
/* sanity check */ /* sanity check */
if (unlikely(task == NULL)) { if (unlikely(task == NULL)) {
printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
return -EINVAL; return -EINVAL;
} }
/* if (task == current || is_system) {
* this test is always true in system wide mode
*/
if (task == current) {
fmt = ctx->ctx_buf_fmt; fmt = ctx->ctx_buf_fmt;
...@@ -3618,25 +3609,23 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3618,25 +3609,23 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
prefetch(ctx->ctx_smpl_hdr); prefetch(ctx->ctx_smpl_hdr);
rst_ctrl.stop_monitoring = 0; rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_NO_RESET; rst_ctrl.bits.reset_ovfl_pmds = 1;
if (is_loaded) if (state == PFM_CTX_LOADED)
ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
else else
ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else { } else {
rst_ctrl.stop_monitoring = 0; rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET; rst_ctrl.bits.reset_ovfl_pmds = 1;
} }
if (ret == 0) { if (ret == 0) {
if (rst_ctrl.reset_pmds) if (rst_ctrl.bits.reset_ovfl_pmds)
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, rst_ctrl.reset_pmds); pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
if (rst_ctrl.stop_monitoring == 0) { if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring for [%d]\n", task->pid)); DPRINT(("resuming monitoring for [%d]\n", task->pid));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task); if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task);
...@@ -3679,7 +3668,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3679,7 +3668,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
PFM_SET_WORK_PENDING(task, 1); PFM_SET_WORK_PENDING(task, 1);
pfm_set_task_notify(task); pfm_set_task_notify(task);
...@@ -3700,10 +3688,9 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3700,10 +3688,9 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_debug_var = pfm_sysctl.debug; pfm_debug_var = pfm_sysctl.debug;
printk(KERN_ERR "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m==0) { if (m == 0) {
memset(pfm_stats, 0, sizeof(pfm_stats)); memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
} }
...@@ -3711,7 +3698,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3711,7 +3698,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return 0; return 0;
} }
static int static int
pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
...@@ -3919,6 +3905,7 @@ static int ...@@ -3919,6 +3905,7 @@ static int
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
struct pt_regs *tregs; struct pt_regs *tregs;
struct task_struct *task = PFM_CTX_TASK(ctx);
if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL; if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL;
...@@ -3944,7 +3931,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3944,7 +3931,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* *
* disable dcr pp * disable dcr pp
*/ */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
/* /*
...@@ -3968,7 +3955,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3968,7 +3955,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* per-task mode * per-task mode
*/ */
if (ctx->ctx_task == current) { if (task == current) {
/* stop monitoring at kernel level */ /* stop monitoring at kernel level */
pfm_clear_psr_up(); pfm_clear_psr_up();
...@@ -3977,7 +3964,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3977,7 +3964,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
ia64_psr(regs)->up = 0; ia64_psr(regs)->up = 0;
} else { } else {
tregs = ia64_task_regs(ctx->ctx_task); tregs = ia64_task_regs(task);
/* /*
* stop monitoring at the user level * stop monitoring at the user level
...@@ -3988,7 +3975,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3988,7 +3975,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* monitoring disabled in kernel at next reschedule * monitoring disabled in kernel at next reschedule
*/ */
ctx->ctx_saved_psr_up = 0; ctx->ctx_saved_psr_up = 0;
printk("pfm_stop: current [%d] task=[%d]\n", current->pid, ctx->ctx_task->pid); DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid));
} }
return 0; return 0;
} }
...@@ -4034,7 +4021,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4034,7 +4021,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_set_psr_pp(); pfm_set_psr_pp();
/* enable dcr pp */ /* enable dcr pp */
ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
return 0; return 0;
...@@ -4099,6 +4086,28 @@ pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs ...@@ -4099,6 +4086,28 @@ pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs
return ret; return ret;
} }
static int
pfm_check_task_exist(pfm_context_t *ctx)
{
struct task_struct *g, *t;
int ret = -ESRCH;
read_lock(&tasklist_lock);
do_each_thread (g, t) {
if (t->thread.pfm_context == ctx) {
ret = 0;
break;
}
} while_each_thread (g, t);
read_unlock(&tasklist_lock);
DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
return ret;
}
static int static int
pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
...@@ -4199,7 +4208,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4199,7 +4208,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
current->pid, current->pid,
thread->pfm_context, ctx)); thread->pfm_context, ctx));
old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) { if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
goto error_unres; goto error_unres;
...@@ -4309,8 +4318,17 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4309,8 +4318,17 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* release task, there is now a link with the context * release task, there is now a link with the context
*/ */
if (ctx->ctx_fl_system == 0 && task != current) pfm_put_task(task); if (ctx->ctx_fl_system == 0 && task != current) {
pfm_put_task(task);
if (ret == 0) {
ret = pfm_check_task_exist(ctx);
if (ret) {
CTX_UNLOADED(ctx);
ctx->ctx_task = NULL;
}
}
}
return ret; return ret;
} }
...@@ -4327,7 +4345,7 @@ static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); ...@@ -4327,7 +4345,7 @@ static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
static int static int
pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
struct task_struct *task = ctx->ctx_task; struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs; struct pt_regs *tregs;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
...@@ -4409,8 +4427,8 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4409,8 +4427,8 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* cancel user level control * cancel user level control
*/ */
ia64_psr(regs)->sp = 1; ia64_psr(regs)->sp = 1;
DPRINT(("setting psr.sp for [%d]\n", task->pid));
DPRINT(("setting psr.sp for [%d]\n", task->pid));
} }
/* /*
* save PMDs to context * save PMDs to context
...@@ -4483,7 +4501,7 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4483,7 +4501,7 @@ pfm_exit_thread(struct task_struct *task)
pfm_context_t *ctx; pfm_context_t *ctx;
unsigned long flags; unsigned long flags;
struct pt_regs *regs = ia64_task_regs(task); struct pt_regs *regs = ia64_task_regs(task);
int ret; int ret, state;
int free_ok = 0; int free_ok = 0;
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
...@@ -4492,16 +4510,16 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4492,16 +4510,16 @@ pfm_exit_thread(struct task_struct *task)
DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
state = ctx->ctx_state;
switch(state) {
case PFM_CTX_UNLOADED:
/* /*
* come here only if attached * come here only if attached
*/ */
if (unlikely(CTX_IS_UNLOADED(ctx))) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
goto skip_all; break;
} case PFM_CTX_LOADED:
case PFM_CTX_MASKED:
if (CTX_IS_LOADED(ctx) || CTX_IS_MASKED(ctx)) {
ret = pfm_context_unload(ctx, NULL, 0, regs); ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) { if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
...@@ -4510,8 +4528,8 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4510,8 +4528,8 @@ pfm_exit_thread(struct task_struct *task)
DPRINT(("ctx terminated by [%d]\n", task->pid)); DPRINT(("ctx terminated by [%d]\n", task->pid));
pfm_end_notify_user(ctx); pfm_end_notify_user(ctx);
break;
} else if (CTX_IS_ZOMBIE(ctx)) { case PFM_CTX_ZOMBIE:
pfm_clear_psr_up(); pfm_clear_psr_up();
BUG_ON(ctx->ctx_smpl_hdr); BUG_ON(ctx->ctx_smpl_hdr);
...@@ -4519,11 +4537,15 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4519,11 +4537,15 @@ pfm_exit_thread(struct task_struct *task)
pfm_force_cleanup(ctx, regs); pfm_force_cleanup(ctx, regs);
free_ok = 1; free_ok = 1;
break;
default:
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
break;
} }
{ u64 psr = pfm_get_psr(); { u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(GET_PMU_OWNER());
} }
skip_all:
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
/* /*
...@@ -4653,7 +4675,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4653,7 +4675,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
/* /*
* reject any call if perfmon was disabled at initialization time * reject any call if perfmon was disabled at initialization time
*/ mask*/
if (PFM_IS_DISABLED()) return -ENOSYS; if (PFM_IS_DISABLED()) return -ENOSYS;
if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) { if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) {
...@@ -4773,6 +4795,8 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4773,6 +4795,8 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
error_args: error_args:
if (args_k) kfree(args_k); if (args_k) kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
return ret; return ret;
} }
...@@ -4789,22 +4813,22 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg ...@@ -4789,22 +4813,22 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg
*/ */
if (CTX_HAS_SMPL(ctx)) { if (CTX_HAS_SMPL(ctx)) {
rst_ctrl.stop_monitoring = 1; rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_NO_RESET; rst_ctrl.bits.reset_ovfl_pmds = 1;
/* XXX: check return value */ /* XXX: check return value */
if (fmt->fmt_restart) if (fmt->fmt_restart)
ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else { } else {
rst_ctrl.stop_monitoring = 0; rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET; rst_ctrl.bits.reset_ovfl_pmds = 1;
} }
if (ret == 0) { if (ret == 0) {
if (rst_ctrl.reset_pmds != PFM_PMD_NO_RESET) if (rst_ctrl.bits.reset_ovfl_pmds) {
pfm_reset_regs(ctx, &ovfl_regs, rst_ctrl.reset_pmds); pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
}
if (rst_ctrl.stop_monitoring == 0) { if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring\n")); DPRINT(("resuming monitoring\n"));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current); if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current);
} else { } else {
...@@ -4974,11 +4998,12 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) ...@@ -4974,11 +4998,12 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
msg->pfm_ovfl_msg.msg_active_set = 0; msg->pfm_ovfl_msg.msg_active_set = 0;
msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
} }
DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n", DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n",
...@@ -5024,9 +5049,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5024,9 +5049,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
pfm_ovfl_arg_t ovfl_arg; pfm_ovfl_arg_t ovfl_arg;
unsigned long mask; unsigned long mask;
unsigned long old_val; unsigned long old_val;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL; unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL;
unsigned long tstamp;
pfm_ovfl_ctrl_t ovfl_ctrl; pfm_ovfl_ctrl_t ovfl_ctrl;
unsigned int i, j, has_smpl, first_pmd = ~0U; unsigned int i, has_smpl;
int must_notify = 0; int must_notify = 0;
if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring; if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring;
...@@ -5036,9 +5062,11 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5036,9 +5062,11 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/ */
if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
tstamp = ia64_get_itc();
mask = pmc0 >> PMU_FIRST_COUNTER; mask = pmc0 >> PMU_FIRST_COUNTER;
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s" DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
"used_pmds=0x%lx reload_pmcs=0x%lx\n", "used_pmds=0x%lx reload_pmcs=0x%lx\n",
pmc0, pmc0,
task ? task->pid: -1, task ? task->pid: -1,
...@@ -5074,91 +5102,132 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5074,91 +5102,132 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* check for overflow condition * check for overflow condition
*/ */
if (likely(old_val > ctx->ctx_pmds[i].val)) { if (likely(old_val > ctx->ctx_pmds[i].val)) {
ovfl_pmds |= 1UL << i; ovfl_pmds |= 1UL << i;
/*
* keep track of pmds of interest for samples
*/
if (has_smpl) {
if (first_pmd == ~0U) first_pmd = i;
smpl_pmds |= ctx->ctx_pmds[i].smpl_pmds[0];
}
if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
} }
DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx first_pmd=%u smpl_pmds=0x%lx\n", DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx "
"ovfl_notify=0x%lx\n",
i, ctx->ctx_pmds[i].val, old_val, i, ctx->ctx_pmds[i].val, old_val,
ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, first_pmd, smpl_pmds)); ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify));
} }
ovfl_ctrl.notify_user = ovfl_notify ? 1 : 0; /*
ovfl_ctrl.reset_pmds = ovfl_pmds && ovfl_notify == 0UL ? 1 : 0; * there was no 64-bit overflow, nothing else to do
ovfl_ctrl.block = ovfl_notify ? 1 : 0; */
ovfl_ctrl.stop_monitoring = ovfl_notify ? 1 : 0; if (ovfl_pmds == 0UL) return;
/*
* reset all control bits
*/
ovfl_ctrl.val = 0;
/* /*
* when a overflow is detected, check for sampling buffer, if present, invoke * if a sampling format module exists, then we "cache" the overflow by
* record() callback. * calling the module's handler() routine.
*/ */
if (ovfl_pmds && has_smpl) { if (has_smpl) {
unsigned long start_cycles; unsigned long start_cycles, end_cycles;
unsigned long pmd_mask, smpl_pmds;
int j, k, ret = 0;
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
ovfl_arg.ovfl_pmds[0] = ovfl_pmds; pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
ovfl_arg.ovfl_notify[0] = ovfl_notify;
ovfl_arg.ovfl_ctrl = ovfl_ctrl;
ovfl_arg.smpl_pmds[0] = smpl_pmds;
prefetch(ctx->ctx_smpl_hdr); prefetch(ctx->ctx_smpl_hdr);
ovfl_arg.pmd_value = ctx->ctx_pmds[first_pmd].val; for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
ovfl_arg.pmd_last_reset = ctx->ctx_pmds[first_pmd].lval;
ovfl_arg.pmd_eventid = ctx->ctx_pmds[first_pmd].eventid; mask = 1UL << i;
if ((pmd_mask & 0x1) == 0) continue;
ovfl_arg.ovfl_pmd = (unsigned char )i;
ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0;
ovfl_arg.active_set = 0;
ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */
ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
ovfl_arg.pmd_value = ctx->ctx_pmds[i].val;
ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval;
ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid;
/* /*
* copy values of pmds of interest. Sampling format may copy them * copy values of pmds of interest. Sampling format may copy them
* into sampling buffer. * into sampling buffer.
*/ */
if (smpl_pmds) { if (smpl_pmds) {
for(i=0, j=0; smpl_pmds; i++, smpl_pmds >>=1) { for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
if ((smpl_pmds & 0x1) == 0) continue; if ((smpl_pmds & 0x1) == 0) continue;
ovfl_arg.smpl_pmds_values[j++] = PMD_IS_COUNTING(i) ? pfm_read_soft_counter(ctx, i) : ia64_get_pmd(i); ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
} }
} }
pfm_stats[this_cpu].pfm_smpl_handler_calls++; pfm_stats[this_cpu].pfm_smpl_handler_calls++;
start_cycles = ia64_get_itc(); start_cycles = ia64_get_itc();
/* /*
* call custom buffer format record (handler) routine * call custom buffer format record (handler) routine
*/ */
(*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs); ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp);
end_cycles = ia64_get_itc();
pfm_stats[this_cpu].pfm_smpl_handler_cycles += ia64_get_itc() - start_cycles; /*
* For those controls, we take the union because they have
* an all or nothing behavior.
*/
ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user;
ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task;
ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring;
ovfl_ctrl.bits.reset_ovfl_pmds |= ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds; /* yes or no */
ovfl_pmds = ovfl_arg.ovfl_pmds[0]; pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
ovfl_notify = ovfl_arg.ovfl_notify[0]; }
ovfl_ctrl = ovfl_arg.ovfl_ctrl; /*
* when the module cannot handle the rest of the overflows, we abort right here
*/
if (ret && pmd_mask) {
DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n",
current->pid,
pmd_mask<<PMU_FIRST_COUNTER));
}
} else {
/*
* when no sampling module is used, then the default
* is to notify on overflow if requested by user
*/
ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
} }
if (ovfl_pmds && ovfl_ctrl.reset_pmds) { /*
pfm_reset_regs(ctx, &ovfl_pmds, ovfl_ctrl.reset_pmds); * if we (still) have some overflowed PMD but no notification is requested
* then we use the short reset period.
*/
if (ovfl_ctrl.bits.reset_ovfl_pmds) {
unsigned long bm = ovfl_pmds;
pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
} }
if (ovfl_notify && ovfl_ctrl.notify_user) { if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
/* /*
* keep track of what to reset when unblocking * keep track of what to reset when unblocking
*/ */
ctx->ctx_ovfl_regs[0] = ovfl_pmds; ctx->ctx_ovfl_regs[0] = ovfl_pmds;
if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.block) { /*
* check for blocking context
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
/* /*
* set the perfmon specific checking pending work * set the perfmon specific checking pending work for the task
*/ */
PFM_SET_WORK_PENDING(task, 1); PFM_SET_WORK_PENDING(task, 1);
...@@ -5175,21 +5244,22 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5175,21 +5244,22 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
must_notify = 1; must_notify = 1;
} }
DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx stopped=%d\n", DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
current->pid, current->pid,
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task), PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason, ctx->ctx_fl_trap_reason,
ovfl_pmds, ovfl_pmds,
ovfl_notify, ovfl_notify,
ovfl_ctrl.stop_monitoring ? 1 : 0)); ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
/* /*
* in case monitoring must be stopped, we toggle the psr bits * in case monitoring must be stopped, we toggle the psr bits
*/ */
if (ovfl_ctrl.stop_monitoring) { if (ovfl_ctrl.bits.mask_monitoring) {
pfm_mask_monitoring(task); pfm_mask_monitoring(task);
CTX_MASKED(ctx); CTX_MASKED(ctx);
} }
/* /*
* send notification now * send notification now
*/ */
...@@ -5197,7 +5267,6 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5197,7 +5267,6 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
return; return;
sanity_check: sanity_check:
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
smp_processor_id(), smp_processor_id(),
...@@ -5305,7 +5374,7 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) ...@@ -5305,7 +5374,7 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
static pfm_irq_handler_t static pfm_irq_handler_t
pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
{ {
unsigned long m; unsigned long start_cycles, total_cycles;
unsigned long min, max; unsigned long min, max;
int this_cpu; int this_cpu;
int ret; int ret;
...@@ -5314,19 +5383,22 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) ...@@ -5314,19 +5383,22 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
m = ia64_get_itc(); start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs); ret = pfm_do_interrupt_handler(irq, arg, regs);
m = ia64_get_itc() - m; total_cycles = ia64_get_itc();
/* /*
* don't measure spurious interrupts * don't measure spurious interrupts
*/ */
if (ret == 0) { if (likely(ret == 0)) {
if (m < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = m; total_cycles -= start_cycles;
if (m > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = m;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += m; if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
} }
PFM_IRQ_HANDLER_RET(); PFM_IRQ_HANDLER_RET();
} }
...@@ -5459,13 +5531,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i ...@@ -5459,13 +5531,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* if monitoring has started * if monitoring has started
*/ */
if (dcr_pp) { if (dcr_pp) {
dcr = ia64_get_dcr(); dcr = ia64_getreg(_IA64_REG_CR_DCR);
/* /*
* context switching in? * context switching in?
*/ */
if (is_ctxswin) { if (is_ctxswin) {
/* mask monitoring for the idle task */ /* mask monitoring for the idle task */
ia64_set_dcr(dcr & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp(); pfm_clear_psr_pp();
ia64_srlz_i(); ia64_srlz_i();
return; return;
...@@ -5477,10 +5549,14 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i ...@@ -5477,10 +5549,14 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* Due to inlining this odd if-then-else construction generates * Due to inlining this odd if-then-else construction generates
* better code. * better code.
*/ */
ia64_set_dcr(dcr |IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
pfm_set_psr_pp(); pfm_set_psr_pp();
ia64_srlz_i(); ia64_srlz_i();
} }
{ unsigned long val;
val = ia64_get_pmc(4);
if ((val & (1UL<<23)) == 0UL) printk("perfmon: PMU off: pmc4=0x%lx\n", val);
}
} }
void void
...@@ -5743,13 +5819,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -5743,13 +5819,6 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(GET_PMU_OWNER()); BUG_ON(GET_PMU_OWNER());
t = &task->thread; t = &task->thread;
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
/* /*
* possible on unload * possible on unload
*/ */
...@@ -5764,6 +5833,12 @@ pfm_load_regs (struct task_struct *task) ...@@ -5764,6 +5833,12 @@ pfm_load_regs (struct task_struct *task)
* access, not CPU concurrency. * access, not CPU concurrency.
*/ */
flags = pfm_protect_ctx_ctxsw(ctx); flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
if (unlikely(CTX_IS_ZOMBIE(ctx))) { if (unlikely(CTX_IS_ZOMBIE(ctx))) {
struct pt_regs *regs = ia64_task_regs(task); struct pt_regs *regs = ia64_task_regs(task);
...@@ -6126,6 +6201,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) ...@@ -6126,6 +6201,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val)); DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
if (is_self) task->thread.pmds[i] = pmd_val; if (is_self) task->thread.pmds[i] = pmd_val;
ctx->ctx_pmds[i].val = val; ctx->ctx_pmds[i].val = val;
} }
} }
...@@ -6257,7 +6333,7 @@ pfm_init_percpu (void) ...@@ -6257,7 +6333,7 @@ pfm_init_percpu (void)
if (smp_processor_id() == 0) if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR); ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d(); ia64_srlz_d();
/* /*
......
...@@ -109,21 +109,15 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v ...@@ -109,21 +109,15 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v
} }
static int static int
default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs) default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
{ {
pfm_default_smpl_hdr_t *hdr; pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_entry_t *ent; pfm_default_smpl_entry_t *ent;
void *cur, *last; void *cur, *last;
unsigned long *e; unsigned long *e;
unsigned long ovfl_mask;
unsigned long ovfl_notify;
unsigned long stamp;
unsigned int npmds, i; unsigned int npmds, i;
unsigned char ovfl_pmd;
/* unsigned char ovfl_notify;
* some time stamp
*/
stamp = ia64_get_itc();
if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) { if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg)); DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
...@@ -133,8 +127,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -133,8 +127,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr = (pfm_default_smpl_hdr_t *)buf; hdr = (pfm_default_smpl_hdr_t *)buf;
cur = hdr->hdr_cur_pos; cur = hdr->hdr_cur_pos;
last = hdr->hdr_last_pos; last = hdr->hdr_last_pos;
ovfl_mask = arg->ovfl_pmds[0]; ovfl_pmd = arg->ovfl_pmd;
ovfl_notify = arg->ovfl_notify[0]; ovfl_notify = arg->ovfl_notify;
/* /*
* check for space against largest possibly entry. * check for space against largest possibly entry.
...@@ -153,12 +147,12 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -153,12 +147,12 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr->hdr_count++; hdr->hdr_count++;
DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n", DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
task->pid, task->pid,
hdr->hdr_count, hdr->hdr_count,
cur, last, cur, last,
last-cur, last-cur,
ovfl_mask, ovfl_pmd,
ovfl_notify, npmds)); ovfl_notify, npmds));
/* /*
...@@ -172,7 +166,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -172,7 +166,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
* - this is not necessarily the task controlling the session * - this is not necessarily the task controlling the session
*/ */
ent->pid = current->pid; ent->pid = current->pid;
ent->cpu = smp_processor_id(); ent->ovfl_pmd = ovfl_pmd;
ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val; ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
/* /*
...@@ -180,13 +174,9 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -180,13 +174,9 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
*/ */
ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3); ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
/*
* which registers overflowed
*/
ent->ovfl_pmds = ovfl_mask;
ent->tstamp = stamp; ent->tstamp = stamp;
ent->cpu = smp_processor_id();
ent->set = arg->active_set; ent->set = arg->active_set;
ent->reserved1 = 0;
/* /*
* selectively store PMDs in increasing index number * selectively store PMDs in increasing index number
...@@ -206,14 +196,14 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -206,14 +196,14 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
/* /*
* keep same ovfl_pmds, ovfl_notify * keep same ovfl_pmds, ovfl_notify
*/ */
arg->ovfl_ctrl.notify_user = 0; arg->ovfl_ctrl.bits.notify_user = 0;
arg->ovfl_ctrl.block = 0; arg->ovfl_ctrl.bits.block_task = 0;
arg->ovfl_ctrl.stop_monitoring = 0; arg->ovfl_ctrl.bits.mask_monitoring = 0;
arg->ovfl_ctrl.reset_pmds = 1; arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
return 0; return 0;
full: full:
DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify)); DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
/* /*
* increment number of buffer overflow. * increment number of buffer overflow.
...@@ -222,22 +212,21 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -222,22 +212,21 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr->hdr_overflows++; hdr->hdr_overflows++;
/* /*
* if no notification is needed, then we just reset the buffer index. * if no notification is needed, then we saturate the buffer
*/ */
if (ovfl_notify == 0UL) { if (ovfl_notify == 0) {
hdr->hdr_count = 0UL; hdr->hdr_count = 0UL;
arg->ovfl_ctrl.notify_user = 0; arg->ovfl_ctrl.bits.notify_user = 0;
arg->ovfl_ctrl.block = 0; arg->ovfl_ctrl.bits.block_task = 0;
arg->ovfl_ctrl.stop_monitoring = 0; arg->ovfl_ctrl.bits.mask_monitoring = 1;
arg->ovfl_ctrl.reset_pmds = 1; arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
} else { } else {
/* keep same ovfl_pmds, ovfl_notify */ arg->ovfl_ctrl.bits.notify_user = 1;
arg->ovfl_ctrl.notify_user = 1; arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
arg->ovfl_ctrl.block = 1; arg->ovfl_ctrl.bits.mask_monitoring = 1;
arg->ovfl_ctrl.stop_monitoring = 1; arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
arg->ovfl_ctrl.reset_pmds = 0;
} }
return 0; return -1; /* we are full, sorry */
} }
static int static int
...@@ -250,8 +239,8 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru ...@@ -250,8 +239,8 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru
hdr->hdr_count = 0UL; hdr->hdr_count = 0UL;
hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr); hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
ctrl->stop_monitoring = 0; ctrl->bits.mask_monitoring = 0;
ctrl->reset_pmds = PFM_PMD_LONG_RESET; ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
return 0; return 0;
} }
...@@ -272,6 +261,7 @@ static pfm_buffer_fmt_t default_fmt={ ...@@ -272,6 +261,7 @@ static pfm_buffer_fmt_t default_fmt={
.fmt_init = default_init, .fmt_init = default_init,
.fmt_handler = default_handler, .fmt_handler = default_handler,
.fmt_restart = default_restart, .fmt_restart = default_restart,
.fmt_restart_active = default_restart,
.fmt_exit = default_exit, .fmt_exit = default_exit,
}; };
......
...@@ -741,8 +741,8 @@ cpu_init (void) ...@@ -741,8 +741,8 @@ cpu_init (void)
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
* be fine). * be fine).
*/ */
ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
if (current->mm) if (current->mm)
...@@ -758,11 +758,11 @@ cpu_init (void) ...@@ -758,11 +758,11 @@ cpu_init (void)
ia64_set_itv(1 << 16); ia64_set_itv(1 << 16);
ia64_set_lrr0(1 << 16); ia64_set_lrr0(1 << 16);
ia64_set_lrr1(1 << 16); ia64_set_lrr1(1 << 16);
ia64_set_pmv(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
ia64_set_cmcv(1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
/* clear TPR & XTP to enable all interrupt classes: */ /* clear TPR & XTP to enable all interrupt classes: */
ia64_set_tpr(0); ia64_setreg(_IA64_REG_CR_TPR, 0);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
normal_xtp(); normal_xtp();
#endif #endif
......
/* /*
* Architecture-specific signal handling support. * Architecture-specific signal handling support.
* *
* Copyright (C) 1999-2002 Hewlett-Packard Co * Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Derived from i386 and Alpha versions. * Derived from i386 and Alpha versions.
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/rse.h> #include <asm/rse.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
...@@ -41,6 +42,12 @@ ...@@ -41,6 +42,12 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif #endif
#ifdef ASM_SUPPORTED
/*
* Don't let GCC uses f16-f31 so that when we setup/restore the registers in the signal
* context in __kernel_sigtramp(), we can be sure that registers f16-f31 contain user-level
* values.
*/
register double f16 asm ("f16"); register double f17 asm ("f17"); register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19"); register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21"); register double f20 asm ("f20"); register double f21 asm ("f21");
...@@ -50,6 +57,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -50,6 +57,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
long long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr) ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
...@@ -192,7 +200,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -192,7 +200,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
case __SI_TIMER >> 16: case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value); err |= __put_user(from->si_ptr, &to->si_ptr);
break; break;
case __SI_CHLD >> 16: case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_utime, &to->si_utime);
...@@ -592,10 +600,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -592,10 +600,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
if (IS_IA32_PROCESS(&scr->pt)) { if (IS_IA32_PROCESS(&scr->pt)) {
scr->pt.r8 = scr->pt.r1; scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2; scr->pt.cr_iip -= 2;
if (errno == ERESTART_RESTARTBLOCK) { if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */
scr->pt.cr_iip -= 2;
}
} else { } else {
/* /*
* Note: the syscall number is in r15 which is saved in * Note: the syscall number is in r15 which is saved in
......
...@@ -7,6 +7,20 @@ ...@@ -7,6 +7,20 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/ */
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/fpswa.h>
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
/* /*
* fp_emulate() needs to be able to access and update all floating point registers. Those * fp_emulate() needs to be able to access and update all floating point registers. Those
* saved in pt_regs can be accessed through that structure, but those not saved, will be * saved in pt_regs can be accessed through that structure, but those not saved, will be
...@@ -15,6 +29,7 @@ ...@@ -15,6 +29,7 @@
* by declaring preserved registers that are not marked as "fixed" as global register * by declaring preserved registers that are not marked as "fixed" as global register
* variables. * variables.
*/ */
#ifdef ASM_SUPPORTED
register double f2 asm ("f2"); register double f3 asm ("f3"); register double f2 asm ("f2"); register double f3 asm ("f3");
register double f4 asm ("f4"); register double f5 asm ("f5"); register double f4 asm ("f4"); register double f5 asm ("f5");
...@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); ...@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27"); register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29"); register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31"); register double f30 asm ("f30"); register double f31 asm ("f31");
#endif
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/hardirq.h>
#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/fpswa.h>
extern spinlock_t timerlist_lock; extern spinlock_t timerlist_lock;
...@@ -357,6 +359,10 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -357,6 +359,10 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x11) { if (isr & 0x11) {
siginfo.si_code = FPE_FLTINV; siginfo.si_code = FPE_FLTINV;
} else if (isr & 0x22) {
/* denormal operand gets the same si_code as underflow
* see arch/i386/kernel/traps.c:math_error() */
siginfo.si_code = FPE_FLTUND;
} else if (isr & 0x44) { } else if (isr & 0x44) {
siginfo.si_code = FPE_FLTDIV; siginfo.si_code = FPE_FLTDIV;
} }
......
...@@ -18,9 +18,10 @@ ...@@ -18,9 +18,10 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <asm/uaccess.h> #include <asm/intrinsics.h>
#include <asm/rse.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/rse.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
...@@ -231,7 +232,7 @@ static u16 fr_info[32]={ ...@@ -231,7 +232,7 @@ static u16 fr_info[32]={
static void static void
invala_gr (int regno) invala_gr (int regno)
{ {
# define F(reg) case reg: __asm__ __volatile__ ("invala.e r%0" :: "i"(reg)); break # define F(reg) case reg: ia64_invala_gr(reg); break
switch (regno) { switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
...@@ -258,7 +259,7 @@ invala_gr (int regno) ...@@ -258,7 +259,7 @@ invala_gr (int regno)
static void static void
invala_fr (int regno) invala_fr (int regno)
{ {
# define F(reg) case reg: __asm__ __volatile__ ("invala.e f%0" :: "i"(reg)); break # define F(reg) case reg: ia64_invala_fr(reg); break
switch (regno) { switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
...@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs) ...@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
static inline void static inline void
float_spill_f0 (struct ia64_fpreg *final) float_spill_f0 (struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("stf.spill [%0]=f0" :: "r"(final) : "memory"); ia64_stf_spill(final, 0);
} }
static inline void static inline void
float_spill_f1 (struct ia64_fpreg *final) float_spill_f1 (struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("stf.spill [%0]=f1" :: "r"(final) : "memory"); ia64_stf_spill(final, 1);
} }
static void static void
...@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={ ...@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={
static inline void static inline void
mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfe f6=[%0];; stf.spill [%1]=f6" ia64_ldfe(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf8 f6=[%0];; stf.spill [%1]=f6" ia64_ldf8(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfs f6=[%0];; stf.spill [%1]=f6" ia64_ldfs(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final) mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldfd f6=[%0];; stf.spill [%1]=f6" ia64_ldfd(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf_spill(final, 6);
} }
static inline void static inline void
float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfe [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfe(final, 6);
} }
static inline void static inline void
float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stf8 [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stf8(final, 6);
} }
static inline void static inline void
float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfs [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfs(final, 6);
} }
static inline void static inline void
float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{ {
__asm__ __volatile__ ("ldf.fill f6=[%0];; stfd [%1]=f6" ia64_ldf_fill(6, init);
:: "r"(init), "r"(final) : "f6","memory"); ia64_stop();
ia64_stfd(final, 6);
} }
static int static int
......
...@@ -35,6 +35,7 @@ SECTIONS ...@@ -35,6 +35,7 @@ SECTIONS
{ {
*(.text.ivt) *(.text.ivt)
*(.text) *(.text)
*(.gnu.linkonce.t*)
} }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET) .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) } { *(.text2) }
...@@ -183,7 +184,7 @@ SECTIONS ...@@ -183,7 +184,7 @@ SECTIONS
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - LOAD_OFFSET) .data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16); . = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
...@@ -194,7 +195,7 @@ SECTIONS ...@@ -194,7 +195,7 @@ SECTIONS
can access them all, and initialized data all before uninitialized, so can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */ we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) } { *(.sdata) *(.sdata1) *(.srdata) }
_edata = .; _edata = .;
_bss = .; _bss = .;
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
......
...@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi ...@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi
/* /*
* Flush ALAT entries also. * Flush ALAT entries also.
*/ */
asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) ia64_ptcga(start, (nbits<<2));
: "memory"); ia64_srlz_i();
start += (1UL << nbits); start += (1UL << nbits);
} while (start < end); } while (start < end);
} }
...@@ -118,15 +118,13 @@ local_flush_tlb_all (void) ...@@ -118,15 +118,13 @@ local_flush_tlb_all (void)
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < count0; ++i) { for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) { for (j = 0; j < count1; ++j) {
asm volatile ("ptc.e %0" :: "r"(addr)); ia64_ptce(addr);
addr += stride1; addr += stride1;
} }
addr += stride0; addr += stride0;
} }
local_irq_restore(flags); local_irq_restore(flags);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
void void
...@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
platform_global_tlb_purge(start, end, nbits); platform_global_tlb_purge(start, end, nbits);
# else # else
do { do {
asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits); start += (1UL << nbits);
} while (start < end); } while (start < end);
# endif # endif
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
void __init void __init
......
...@@ -200,7 +200,7 @@ efi_unimplemented (void) ...@@ -200,7 +200,7 @@ efi_unimplemented (void)
#ifdef SGI_SN2 #ifdef SGI_SN2
#undef cpu_physical_id #undef cpu_physical_id
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) #define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
void void
fprom_send_cpei(void) { fprom_send_cpei(void) {
...@@ -224,14 +224,14 @@ fprom_send_cpei(void) { ...@@ -224,14 +224,14 @@ fprom_send_cpei(void) {
#endif #endif
static long static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2, sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7) unsigned long in6, unsigned long in7)
{ {
register long r9 asm ("r9") = 0; long r9 = 0;
register long r10 asm ("r10") = 0; long r10 = 0;
register long r11 asm ("r11") = 0; long r11 = 0;
long status; long status;
/* /*
...@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, ...@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} }
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
return status; return ((struct sal_ret_values) {status, r9, r10, r11});
} }
......
...@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) { ...@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
irr_bit = irq_to_vector(irq) % 64; irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) { switch (irr_reg_num) {
case 0: case 0:
irr_reg = ia64_get_irr0(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break; break;
case 1: case 1:
irr_reg = ia64_get_irr1(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break; break;
case 2: case 2:
irr_reg = ia64_get_irr2(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break; break;
case 3: case 3:
irr_reg = ia64_get_irr3(); irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break; break;
} }
if (!test_bit(irr_bit, &irr_reg) ) { if (!test_bit(irr_bit, &irr_reg) ) {
...@@ -354,9 +354,9 @@ sn_get_next_bit(void) { ...@@ -354,9 +354,9 @@ sn_get_next_bit(void) {
void void
sn_set_tpr(int vector) { sn_set_tpr(int vector) {
if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) { if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
ia64_set_tpr(vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
} else { } else {
ia64_set_tpr(IA64_LAST_DEVICE_VECTOR); ia64_setreg(_IA64_REG_CR_TPR, IA64_LAST_DEVICE_VECTOR);
} }
} }
......
...@@ -395,7 +395,7 @@ sn_cpu_init(void) ...@@ -395,7 +395,7 @@ sn_cpu_init(void)
return; return;
cpuid = smp_processor_id(); cpuid = smp_processor_id();
cpuphyid = ((ia64_get_lid() >> 16) & 0xffff); cpuphyid = ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff);
nasid = cpu_physical_id_to_nasid(cpuphyid); nasid = cpu_physical_id_to_nasid(cpuphyid);
cnode = nasid_to_cnodeid(nasid); cnode = nasid_to_cnodeid(nasid);
slice = cpu_physical_id_to_slice(cpuphyid); slice = cpu_physical_id_to_slice(cpuphyid);
......
...@@ -11,81 +11,73 @@ ...@@ -11,81 +11,73 @@
#include <asm/sn/sn2/io.h> #include <asm/sn/sn2/io.h>
#undef __sn_inb
#undef __sn_inw
#undef __sn_inl
#undef __sn_outb
#undef __sn_outw
#undef __sn_outl
#undef __sn_readb
#undef __sn_readw
#undef __sn_readl
#undef __sn_readq
unsigned int unsigned int
sn_inb (unsigned long port) __sn_inb (unsigned long port)
{ {
return __sn_inb(port); return ___sn_inb(port);
} }
unsigned int unsigned int
sn_inw (unsigned long port) __sn_inw (unsigned long port)
{ {
return __sn_inw(port); return ___sn_inw(port);
} }
unsigned int unsigned int
sn_inl (unsigned long port) __sn_inl (unsigned long port)
{ {
return __sn_inl(port); return ___sn_inl(port);
} }
void void
sn_outb (unsigned char val, unsigned long port) __sn_outb (unsigned char val, unsigned long port)
{ {
__sn_outb(val, port); ___sn_outb(val, port);
} }
void void
sn_outw (unsigned short val, unsigned long port) __sn_outw (unsigned short val, unsigned long port)
{ {
__sn_outw(val, port); ___sn_outw(val, port);
} }
void void
sn_outl (unsigned int val, unsigned long port) __sn_outl (unsigned int val, unsigned long port)
{ {
__sn_outl(val, port); ___sn_outl(val, port);
} }
unsigned char unsigned char
sn_readb (void *addr) __sn_readb (void *addr)
{ {
return __sn_readb (addr); return ___sn_readb (addr);
} }
unsigned short unsigned short
sn_readw (void *addr) __sn_readw (void *addr)
{ {
return __sn_readw (addr); return ___sn_readw (addr);
} }
unsigned int unsigned int
sn_readl (void *addr) __sn_readl (void *addr)
{ {
return __sn_readl (addr); return ___sn_readl (addr);
} }
unsigned long unsigned long
sn_readq (void *addr) __sn_readq (void *addr)
{ {
return __sn_readq (addr); return ___sn_readq (addr);
} }
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
...@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v) ...@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old + i; new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v) ...@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old + i; new = old + i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old - i; new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
...@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ...@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v); CMPXCHG_BUGCHECK(v);
old = atomic_read(v); old = atomic_read(v);
new = old - i; new = old - i;
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new; return new;
} }
......
...@@ -292,7 +292,7 @@ ffz (unsigned long x) ...@@ -292,7 +292,7 @@ ffz (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1))); result = ia64_popcnt(x & (~x - 1));
return result; return result;
} }
...@@ -307,7 +307,7 @@ __ffs (unsigned long x) ...@@ -307,7 +307,7 @@ __ffs (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x)); result = ia64_popcnt((x-1) & ~x);
return result; return result;
} }
...@@ -323,7 +323,7 @@ ia64_fls (unsigned long x) ...@@ -323,7 +323,7 @@ ia64_fls (unsigned long x)
long double d = x; long double d = x;
long exp; long exp;
__asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d)); exp = ia64_getf_exp(d);
return exp - 0xffff; return exp - 0xffff;
} }
...@@ -349,7 +349,7 @@ static __inline__ unsigned long ...@@ -349,7 +349,7 @@ static __inline__ unsigned long
hweight64 (unsigned long x) hweight64 (unsigned long x)
{ {
unsigned long result; unsigned long result;
__asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x)); result = ia64_popcnt(x);
return result; return result;
} }
......
...@@ -7,13 +7,14 @@ ...@@ -7,13 +7,14 @@
*/ */
#include <asm/types.h> #include <asm/types.h>
#include <asm/intrinsics.h>
static __inline__ __const__ __u64 static __inline__ __const__ __u64
__ia64_swab64 (__u64 x) __ia64_swab64 (__u64 x)
{ {
__u64 result; __u64 result;
__asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x)); result = ia64_mux1(x, ia64_mux1_rev);
return result; return result;
} }
......
...@@ -6,8 +6,12 @@ ...@@ -6,8 +6,12 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
/* In kernel mode, thread pointer (r13) is used to point to the #include <asm/intrinsics.h>
current task structure. */
register struct task_struct *current asm ("r13"); /*
* In kernel mode, thread pointer (r13) is used to point to the current task
* structure.
*/
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */ #endif /* _ASM_IA64_CURRENT_H */
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Delay routines using a pre-computed "cycles/usec" value. * Delay routines using a pre-computed "cycles/usec" value.
* *
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
...@@ -17,12 +17,14 @@ ...@@ -17,12 +17,14 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/intrinsics.h>
#include <asm/processor.h> #include <asm/processor.h>
static __inline__ void static __inline__ void
ia64_set_itm (unsigned long val) ia64_set_itm (unsigned long val)
{ {
__asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_CR_ITM, val);
ia64_srlz_d();
} }
static __inline__ unsigned long static __inline__ unsigned long
...@@ -30,20 +32,23 @@ ia64_get_itm (void) ...@@ -30,20 +32,23 @@ ia64_get_itm (void)
{ {
unsigned long result; unsigned long result;
__asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_CR_ITM);
ia64_srlz_d();
return result; return result;
} }
static __inline__ void static __inline__ void
ia64_set_itv (unsigned long val) ia64_set_itv (unsigned long val)
{ {
__asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_CR_ITV, val);
ia64_srlz_d();
} }
static __inline__ void static __inline__ void
ia64_set_itc (unsigned long val) ia64_set_itc (unsigned long val)
{ {
__asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_AR_ITC, val);
ia64_srlz_d();
} }
static __inline__ unsigned long static __inline__ unsigned long
...@@ -51,10 +56,13 @@ ia64_get_itc (void) ...@@ -51,10 +56,13 @@ ia64_get_itc (void)
{ {
unsigned long result; unsigned long result;
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
while (unlikely((__s32) result == -1)) while (unlikely((__s32) result == -1)) {
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
}
#endif #endif
return result; return result;
} }
...@@ -62,15 +70,11 @@ ia64_get_itc (void) ...@@ -62,15 +70,11 @@ ia64_get_itc (void)
static __inline__ void static __inline__ void
__delay (unsigned long loops) __delay (unsigned long loops)
{ {
unsigned long saved_ar_lc;
if (loops < 1) if (loops < 1)
return; return;
__asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc)); while (loops--)
__asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1)); ia64_nop(0);
__asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
__asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
} }
static __inline__ void static __inline__ void
......
#ifndef _ASM_IA64_GCC_INTRIN_H
#define _ASM_IA64_GCC_INTRIN_H
/*
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
/* define this macro to get some asm stmts included in 'c' files */
#define ASM_SUPPORTED
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define ia64_barrier() asm volatile ("":::"memory")
#define ia64_stop() asm volatile (";;"::)
#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
#define ia64_setreg(regnum, val) \
({ \
switch (regnum) { \
case _IA64_REG_PSR_L: \
asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
asm volatile ("mov ar%0=%1" :: \
"i" (regnum - _IA64_REG_AR_KR0), \
"r"(val): "memory"); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
asm volatile ("mov cr%0=%1" :: \
"i" (regnum - _IA64_REG_CR_DCR), \
"r"(val): "memory" ); \
break; \
case _IA64_REG_SP: \
asm volatile ("mov r12=%0" :: \
"r"(val): "memory"); \
break; \
case _IA64_REG_GP: \
asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
break; \
default: \
ia64_bad_param_for_setreg(); \
break; \
} \
})
#define ia64_getreg(regnum) \
({ \
__u64 ia64_intri_res; \
\
switch (regnum) { \
case _IA64_REG_GP: \
asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_IP: \
asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_PSR: \
asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_TP: /* for current() */ \
{ \
register __u64 ia64_r13 asm ("r13"); \
ia64_intri_res = ia64_r13; \
} \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
: "i"(regnum - _IA64_REG_AR_KR0)); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
: "i" (regnum - _IA64_REG_CR_DCR)); \
break; \
case _IA64_REG_SP: \
asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
break; \
default: \
ia64_bad_param_for_getreg(); \
break; \
} \
ia64_intri_res; \
})
#define ia64_hint_pause 0
#define ia64_hint(mode) \
({ \
switch (mode) { \
case ia64_hint_pause: \
asm volatile ("hint @pause" ::: "memory"); \
break; \
} \
})
/* Integer values for mux1 instruction */
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1(x, mode) \
({ \
__u64 ia64_intri_res; \
\
switch (mode) { \
case ia64_mux1_brcst: \
asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_mix: \
asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_shuf: \
asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_alt: \
asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_rev: \
asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
} \
ia64_intri_res; \
})
#define ia64_popcnt(x) \
({ \
__u64 ia64_intri_res; \
asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
\
ia64_intri_res; \
})
#define ia64_getf_exp(x) \
({ \
long ia64_intri_res; \
\
asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
\
ia64_intri_res; \
})
#define ia64_shrp(a, b, count) \
({ \
__u64 ia64_intri_res; \
asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
ia64_intri_res; \
})
#define ia64_ldfs(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfd(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf_fill(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_stfs(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfd(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf_spill(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_fetchadd4_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
asm volatile ("fetchadd4.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd4_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
asm volatile ("fetchadd4.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
asm volatile ("fetchadd8.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
asm volatile ("fetchadd8.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_xchg1(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg2(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg4(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg8(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
#define ia64_invala() asm volatile ("invala" ::: "memory")
#define ia64_thash(addr) \
({ \
__u64 ia64_intri_res; \
asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
ia64_intri_res; \
})
#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_tpa(addr) \
({ \
__u64 ia64_pa; \
asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
ia64_pa; \
})
#define __ia64_set_dbr(index, val) \
asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_ibr(index, val) \
asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pkr(index, val) \
asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pmc(index, val) \
asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pmd(index, val) \
asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_rr(index, val) \
asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
#define ia64_get_cpuid(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
ia64_intri_res; \
})
#define __ia64_get_dbr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_ibr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pkr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pmc(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pmd(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_rr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
ia64_intri_res; \
})
#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
#define ia64_ptcga(addr, size) \
asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptcl(addr, size) \
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptri(addr, size) \
asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptrd(addr, size) \
asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
break; \
} \
})
#define ia64_lfetch_fault(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_fault_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
break; \
} \
})
#define ia64_intrin_local_irq_restore(x) \
do { \
asm volatile (" cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
:: "r"((x)) : "p6", "p7", "memory"); \
} while (0)
#endif /* _ASM_IA64_GCC_INTRIN_H */
/*
* Copyright (C) 2002,2003 Intel Corp.
* Jun Nakajima <jun.nakajima@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_IA64_IA64REGS_H
#define _ASM_IA64_IA64REGS_H
/*
* Register Names for getreg() and setreg().
*
* The "magic" numbers happen to match the values used by the Intel compiler's
* getreg()/setreg() intrinsics.
*/
/* Special Registers */
#define _IA64_REG_IP 1016 /* getreg only */
#define _IA64_REG_PSR 1019
#define _IA64_REG_PSR_L 1019
/* General Integer Registers */
#define _IA64_REG_GP 1025 /* R1 */
#define _IA64_REG_R8 1032 /* R8 */
#define _IA64_REG_R9 1033 /* R9 */
#define _IA64_REG_SP 1036 /* R12 */
#define _IA64_REG_TP 1037 /* R13 */
/* Application Registers */
#define _IA64_REG_AR_KR0 3072
#define _IA64_REG_AR_KR1 3073
#define _IA64_REG_AR_KR2 3074
#define _IA64_REG_AR_KR3 3075
#define _IA64_REG_AR_KR4 3076
#define _IA64_REG_AR_KR5 3077
#define _IA64_REG_AR_KR6 3078
#define _IA64_REG_AR_KR7 3079
#define _IA64_REG_AR_RSC 3088
#define _IA64_REG_AR_BSP 3089
#define _IA64_REG_AR_BSPSTORE 3090
#define _IA64_REG_AR_RNAT 3091
#define _IA64_REG_AR_FCR 3093
#define _IA64_REG_AR_EFLAG 3096
#define _IA64_REG_AR_CSD 3097
#define _IA64_REG_AR_SSD 3098
#define _IA64_REG_AR_CFLAG 3099
#define _IA64_REG_AR_FSR 3100
#define _IA64_REG_AR_FIR 3101
#define _IA64_REG_AR_FDR 3102
#define _IA64_REG_AR_CCV 3104
#define _IA64_REG_AR_UNAT 3108
#define _IA64_REG_AR_FPSR 3112
#define _IA64_REG_AR_ITC 3116
#define _IA64_REG_AR_PFS 3136
#define _IA64_REG_AR_LC 3137
#define _IA64_REG_AR_EC 3138
/* Control Registers */
#define _IA64_REG_CR_DCR 4096
#define _IA64_REG_CR_ITM 4097
#define _IA64_REG_CR_IVA 4098
#define _IA64_REG_CR_PTA 4104
#define _IA64_REG_CR_IPSR 4112
#define _IA64_REG_CR_ISR 4113
#define _IA64_REG_CR_IIP 4115
#define _IA64_REG_CR_IFA 4116
#define _IA64_REG_CR_ITIR 4117
#define _IA64_REG_CR_IIPA 4118
#define _IA64_REG_CR_IFS 4119
#define _IA64_REG_CR_IIM 4120
#define _IA64_REG_CR_IHA 4121
#define _IA64_REG_CR_LID 4160
#define _IA64_REG_CR_IVR 4161 /* getreg only */
#define _IA64_REG_CR_TPR 4162
#define _IA64_REG_CR_EOI 4163
#define _IA64_REG_CR_IRR0 4164 /* getreg only */
#define _IA64_REG_CR_IRR1 4165 /* getreg only */
#define _IA64_REG_CR_IRR2 4166 /* getreg only */
#define _IA64_REG_CR_IRR3 4167 /* getreg only */
#define _IA64_REG_CR_ITV 4168
#define _IA64_REG_CR_PMV 4169
#define _IA64_REG_CR_CMCV 4170
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
#define _IA64_REG_INDR_DBR 9001
#define _IA64_REG_INDR_IBR 9002
#define _IA64_REG_INDR_PKR 9003
#define _IA64_REG_INDR_PMC 9004
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
#endif /* _ASM_IA64_IA64REGS_H */
...@@ -8,8 +8,17 @@ ...@@ -8,8 +8,17 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef __ASSEMBLY__
#include <linux/config.h> #include <linux/config.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
# include <asm/intel_intrin.h>
#else
# include <asm/gcc_intrin.h>
#endif
/* /*
* Force an unresolved reference if someone tries to use * Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value. * ia64_fetch_and_add() with a bad value.
...@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \ ({ \
switch (sz) { \ switch (sz) { \
case 4: \ case 4: \
__asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \ tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
case 8: \ case 8: \
__asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \ tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
default: \ default: \
...@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
(__typeof__(*(v))) (_tmp); /* return old value */ \ (__typeof__(*(v))) (_tmp); /* return old value */ \
}) })
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */ #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
/* /*
* This function doesn't exist, so you'll get a linker error if * This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg(). * something tries to do an invalid xchg().
*/ */
extern void __xchg_called_with_bad_pointer (void); extern void ia64_xchg_called_with_bad_pointer (void);
static __inline__ unsigned long #define __xchg(x,ptr,size) \
__xchg (unsigned long x, volatile void *ptr, int size) ({ \
{ unsigned long __xchg_result; \
unsigned long result; \
switch (size) { \
switch (size) { case 1: \
case 1: __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result) break; \
: "r" (ptr), "r" (x) : "memory"); \
return result; case 2: \
__xchg_result = ia64_xchg2((__u16 *)ptr, x); \
case 2: break; \
__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result) \
: "r" (ptr), "r" (x) : "memory"); case 4: \
return result; __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
break; \
case 4: \
__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result) case 8: \
: "r" (ptr), "r" (x) : "memory"); __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
return result; break; \
default: \
case 8: ia64_xchg_called_with_bad_pointer(); \
__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result) } \
: "r" (ptr), "r" (x) : "memory"); __xchg_result; \
return result; })
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr,x) \ #define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
...@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size) ...@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size)
* This function doesn't exist, so you'll get a linker error * This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). * if something tries to do an invalid cmpxchg().
*/ */
extern long __cmpxchg_called_with_bad_pointer(void); extern long ia64_cmpxchg_called_with_bad_pointer (void);
#define ia64_cmpxchg(sem,ptr,old,new,size) \ #define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \ ({ \
__typeof__(ptr) _p_ = (ptr); \
__typeof__(new) _n_ = (new); \
__u64 _o_, _r_; \ __u64 _o_, _r_; \
\ \
switch (size) { \ switch (size) { \
...@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void);
case 8: _o_ = (__u64) (long) (old); break; \ case 8: _o_ = (__u64) (long) (old); break; \
default: break; \ default: break; \
} \ } \
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 2: \ case 2: \
__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 4: \ case 4: \
__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
case 8: \ case 8: \
__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
break; \ break; \
\ \
default: \ default: \
_r_ = __cmpxchg_called_with_bad_pointer(); \ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \ break; \
} \ } \
(__typeof__(old)) _r_; \ (__typeof__(old)) _r_; \
}) })
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr))) #define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */ /* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n) #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
...@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
if (_cmpxchg_bugcheck_count-- <= 0) { \ if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \ void *ip; \
extern int printk(const char *fmt, ...); \ extern int printk(const char *fmt, ...); \
asm ("mov %0=ip" : "=r"(ip)); \ ip = ia64_getreg(_IA64_REG_IP); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \ break; \
} \ } \
...@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void); ...@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void);
# define CMPXCHG_BUGCHECK(v) # define CMPXCHG_BUGCHECK(v)
#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
#endif
#endif /* _ASM_IA64_INTRINSICS_H */ #endif /* _ASM_IA64_INTRINSICS_H */
...@@ -52,6 +52,7 @@ extern unsigned int num_io_spaces; ...@@ -52,6 +52,7 @@ extern unsigned int num_io_spaces;
# ifdef __KERNEL__ # ifdef __KERNEL__
#include <asm/intrinsics.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address) ...@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address)
* Memory fence w/accept. This should never be used in code that is * Memory fence w/accept. This should never be used in code that is
* not IA-64 specific. * not IA-64 specific.
*/ */
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __ia64_mf_a() ia64_mfa()
static inline const unsigned long static inline const unsigned long
__ia64_get_io_port_base (void) __ia64_get_io_port_base (void)
......
...@@ -155,7 +155,7 @@ struct ia64_machine_vector { ...@@ -155,7 +155,7 @@ struct ia64_machine_vector {
ia64_mv_readw_t *readw; ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl; ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq; ia64_mv_readq_t *readq;
}; } __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \ #define MACHVEC_INIT(name) \
{ \ { \
......
...@@ -158,9 +158,7 @@ reload_context (mm_context_t context) ...@@ -158,9 +158,7 @@ reload_context (mm_context_t context)
ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x4000000000000000, rr2);
ia64_set_rr(0x6000000000000000, rr3); ia64_set_rr(0x6000000000000000, rr3);
ia64_set_rr(0x8000000000000000, rr4); ia64_set_rr(0x8000000000000000, rr4);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
} }
static inline void static inline void
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/intrinsics.h>
#include <asm/types.h> #include <asm/types.h>
/* /*
...@@ -143,7 +144,7 @@ get_order (unsigned long size) ...@@ -143,7 +144,7 @@ get_order (unsigned long size)
double d = size - 1; double d = size - 1;
long order; long order;
__asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d)); order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1; order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0) if (order < 0)
order = 0; order = 0;
......
...@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector ...@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector
/* Initialize the processor controlled caches */ /* Initialize the processor controlled caches */
static inline s64 static inline s64
ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict) ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
{ {
struct ia64_pal_retval iprv; struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict); PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
return iprv.status; return iprv.status;
} }
......
...@@ -72,10 +72,11 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type ...@@ -72,10 +72,11 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type
typedef struct { typedef struct {
pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
unsigned long ctx_flags; /* noblock/block */ unsigned long ctx_flags; /* noblock/block */
unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */ unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
unsigned short ctx_reserved1; /* for future use */
int ctx_fd; /* return arg: unique identification for context */ int ctx_fd; /* return arg: unique identification for context */
void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
unsigned long ctx_reserved[11]; /* for future use */ unsigned long ctx_reserved2[11];/* for future use */
} pfarg_context_t; } pfarg_context_t;
/* /*
...@@ -83,7 +84,8 @@ typedef struct { ...@@ -83,7 +84,8 @@ typedef struct {
*/ */
typedef struct { typedef struct {
unsigned int reg_num; /* which register */ unsigned int reg_num; /* which register */
unsigned int reg_set; /* event set for this register */ unsigned short reg_set; /* event set for this register */
unsigned short reg_reserved1; /* for future use */
unsigned long reg_value; /* initial pmc/pmd value */ unsigned long reg_value; /* initial pmc/pmd value */
unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
...@@ -99,15 +101,16 @@ typedef struct { ...@@ -99,15 +101,16 @@ typedef struct {
unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
unsigned long reserved[3]; /* for future use */ unsigned long reg_reserved2[3]; /* for future use */
} pfarg_reg_t; } pfarg_reg_t;
typedef struct { typedef struct {
unsigned int dbreg_num; /* which debug register */ unsigned int dbreg_num; /* which debug register */
unsigned int dbreg_set; /* event set for this register */ unsigned short dbreg_set; /* event set for this register */
unsigned short dbreg_reserved1; /* for future use */
unsigned long dbreg_value; /* value for debug register */ unsigned long dbreg_value; /* value for debug register */
unsigned long dbreg_flags; /* return: dbreg error */ unsigned long dbreg_flags; /* return: dbreg error */
unsigned long dbreg_reserved[1]; /* for future use */ unsigned long dbreg_reserved2[1]; /* for future use */
} pfarg_dbreg_t; } pfarg_dbreg_t;
typedef struct { typedef struct {
...@@ -118,16 +121,19 @@ typedef struct { ...@@ -118,16 +121,19 @@ typedef struct {
typedef struct { typedef struct {
pid_t load_pid; /* process to load the context into */ pid_t load_pid; /* process to load the context into */
unsigned int load_set; /* first event set to load */ unsigned short load_set; /* first event set to load */
unsigned long load_reserved[2]; /* for future use */ unsigned short load_reserved1; /* for future use */
unsigned long load_reserved2[3]; /* for future use */
} pfarg_load_t; } pfarg_load_t;
typedef struct { typedef struct {
int msg_type; /* generic message header */ int msg_type; /* generic message header */
int msg_ctx_fd; /* generic message header */ int msg_ctx_fd; /* generic message header */
unsigned long msg_tstamp; /* for perf tuning */
unsigned int msg_active_set; /* active set at the time of overflow */
unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
unsigned short msg_active_set; /* active set at the time of overflow */
unsigned short msg_reserved1; /* for future use */
unsigned int msg_reserved2; /* for future use */
unsigned long msg_tstamp; /* for perf tuning/debug */
} pfm_ovfl_msg_t; } pfm_ovfl_msg_t;
typedef struct { typedef struct {
...@@ -192,25 +198,28 @@ extern void pfm_handle_work(void); ...@@ -192,25 +198,28 @@ extern void pfm_handle_work(void);
#define PFM_PMD_LONG_RESET 1 #define PFM_PMD_LONG_RESET 1
#define PFM_PMD_SHORT_RESET 2 #define PFM_PMD_SHORT_RESET 2
typedef struct { typedef union {
unsigned int val;
struct {
unsigned int notify_user:1; /* notify user program of overflow */ unsigned int notify_user:1; /* notify user program of overflow */
unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */ unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
unsigned int block:1; /* block monitored task on kernel exit */ unsigned int block_task:1; /* block monitored task on kernel exit */
unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */ unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
unsigned int reserved:26; /* for future use */ unsigned int reserved:28; /* for future use */
} bits;
} pfm_ovfl_ctrl_t; } pfm_ovfl_ctrl_t;
typedef struct { typedef struct {
unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */ unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */ unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */ unsigned short active_set; /* event set active at the time of the overflow */
unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */
unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */
unsigned int active_set; /* event set active at the time of the overflow */
unsigned int reserved1;
unsigned long smpl_pmds[4];
unsigned long smpl_pmds_values[PMU_MAX_PMDS];
pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
unsigned long pmd_last_reset; /* last reset value of of the PMD */
unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
unsigned long pmd_value; /* current 64-bit value of the PMD */
unsigned long pmd_eventid; /* eventid associated with PMD */
} pfm_ovfl_arg_t; } pfm_ovfl_arg_t;
...@@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t { ...@@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t {
int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg); int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size); int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg); int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs); int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs); int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
......
...@@ -16,7 +16,9 @@ ...@@ -16,7 +16,9 @@
*/ */
typedef struct { typedef struct {
unsigned long buf_size; /* size of the buffer in bytes */ unsigned long buf_size; /* size of the buffer in bytes */
unsigned long reserved[3]; /* for future use */ unsigned int flags; /* buffer specific flags */
unsigned int res1; /* for future use */
unsigned long reserved[2]; /* for future use */
} pfm_default_smpl_arg_t; } pfm_default_smpl_arg_t;
/* /*
...@@ -46,28 +48,27 @@ typedef struct { ...@@ -46,28 +48,27 @@ typedef struct {
/* /*
* Entry header in the sampling buffer. The header is directly followed * Entry header in the sampling buffer. The header is directly followed
* with the PMDs saved in increasing index order: PMD4, PMD5, .... How * with the values of the PMD registers of interest saved in increasing
* many PMDs are present depends on how the session was programmed. * index order: PMD4, PMD5, and so on. How many PMDs are present depends
* on how the session was programmed.
* *
* XXX: in this version of the entry, only up to 64 registers can be * In the case where multiple counters overflow at the same time, multiple
* recorded. This should be enough for quite some time. Always check * entries are written consecutively.
* sampling format before parsing entries!
* *
* In the case where multiple counters overflow at the same time, the * last_reset_value member indicates the initial value of the overflowed PMD.
* last_reset_value member indicates the initial value of the
* overflowed PMD with the smallest index. For instance, if PMD2 and
* PMD5 have overflowed, the last_reset_value member contains the
* initial value of PMD2.
*/ */
typedef struct { typedef struct {
int pid; /* current process at PMU interrupt point */ int pid; /* active process at PMU interrupt point */
int cpu; /* cpu on which the overfow occured */ unsigned char reserved1[3]; /* reserved for future use */
unsigned long last_reset_val; /* initial value of 1st overflowed PMD */ unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned long last_reset_val; /* initial value of overflowed PMD */
unsigned long ip; /* where did the overflow interrupt happened */ unsigned long ip; /* where did the overflow interrupt happened */
unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
unsigned long tstamp; /* ar.itc on the CPU that took the overflow */
unsigned int set; /* event set active when overflow ocurred */ unsigned short cpu; /* cpu on which the overfow occured */
unsigned int reserved1; /* for future use */ unsigned short set; /* event set active when overflow ocurred */
unsigned int reserved2; /* for future use */
} pfm_default_smpl_entry_t; } pfm_default_smpl_entry_t;
#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */ #define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
......
...@@ -15,8 +15,9 @@ ...@@ -15,8 +15,9 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/ptrace.h> #include <asm/intrinsics.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/ptrace.h>
#include <asm/ustack.h> #include <asm/ustack.h>
#define IA64_NUM_DBG_REGS 8 #define IA64_NUM_DBG_REGS 8
...@@ -356,38 +357,41 @@ extern unsigned long get_wchan (struct task_struct *p); ...@@ -356,38 +357,41 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return stack pointer of blocked task TSK. */ /* Return stack pointer of blocked task TSK. */
#define KSTK_ESP(tsk) ((tsk)->thread.ksp) #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
static inline unsigned long extern void ia64_getreg_unknown_kr (void);
ia64_get_kr (unsigned long regnum) extern void ia64_setreg_unknown_kr (void);
{
unsigned long r = 0;
switch (regnum) {
case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break;
case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break;
case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break;
case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break;
case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break;
case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break;
case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break;
}
return r;
}
static inline void #define ia64_get_kr(regnum) \
ia64_set_kr (unsigned long regnum, unsigned long r) ({ \
{ unsigned long r = 0; \
switch (regnum) { \
case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break; switch (regnum) { \
case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break; case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break; case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break; case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break; case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break; case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break; case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break; case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
} case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
} default: ia64_getreg_unknown_kr(); break; \
} \
r; \
})
#define ia64_set_kr(regnum, r) \
({ \
switch (regnum) { \
case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
default: ia64_setreg_unknown_kr(); break; \
} \
})
/* /*
* The following three macros can't be inline functions because we don't have struct * The following three macros can't be inline functions because we don't have struct
...@@ -423,8 +427,8 @@ extern void ia32_save_state (struct task_struct *task); ...@@ -423,8 +427,8 @@ extern void ia32_save_state (struct task_struct *task);
extern void ia32_load_state (struct task_struct *task); extern void ia32_load_state (struct task_struct *task);
#endif #endif
#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory"); #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory"); #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
/* load fp 0.0 into fph */ /* load fp 0.0 into fph */
static inline void static inline void
...@@ -450,78 +454,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) { ...@@ -450,78 +454,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) {
ia64_fph_disable(); ia64_fph_disable();
} }
static inline void
ia64_fc (void *addr)
{
asm volatile ("fc %0" :: "r"(addr) : "memory");
}
static inline void
ia64_sync_i (void)
{
asm volatile (";; sync.i" ::: "memory");
}
static inline void
ia64_srlz_i (void)
{
asm volatile (";; srlz.i ;;" ::: "memory");
}
static inline void
ia64_srlz_d (void)
{
asm volatile (";; srlz.d" ::: "memory");
}
static inline __u64
ia64_get_rr (__u64 reg_bits)
{
__u64 r;
asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");
return r;
}
static inline void
ia64_set_rr (__u64 reg_bits, __u64 rr_val)
{
asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
}
static inline __u64
ia64_get_dcr (void)
{
__u64 r;
asm volatile ("mov %0=cr.dcr" : "=r"(r));
return r;
}
static inline void
ia64_set_dcr (__u64 val)
{
asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
static inline __u64
ia64_get_lid (void)
{
__u64 r;
asm volatile ("mov %0=cr.lid" : "=r"(r));
return r;
}
static inline void
ia64_invala (void)
{
asm volatile ("invala" ::: "memory");
}
static inline __u64 static inline __u64
ia64_clear_ic (void) ia64_clear_ic (void)
{ {
__u64 psr; __u64 psr;
asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory"); psr = ia64_getreg(_IA64_REG_PSR);
ia64_stop();
ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
ia64_srlz_i();
return psr; return psr;
} }
...@@ -531,7 +471,9 @@ ia64_clear_ic (void) ...@@ -531,7 +471,9 @@ ia64_clear_ic (void)
static inline void static inline void
ia64_set_psr (__u64 psr) ia64_set_psr (__u64 psr)
{ {
asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory"); ia64_stop();
ia64_setreg(_IA64_REG_PSR_L, psr);
ia64_srlz_d();
} }
/* /*
...@@ -543,14 +485,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num, ...@@ -543,14 +485,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num,
__u64 vmaddr, __u64 pte, __u64 vmaddr, __u64 pte,
__u64 log_page_size) __u64 log_page_size)
{ {
asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
ia64_stop();
if (target_mask & 0x1) if (target_mask & 0x1)
asm volatile ("itr.i itr[%0]=%1" ia64_itri(tr_num, pte);
:: "r"(tr_num), "r"(pte) : "memory");
if (target_mask & 0x2) if (target_mask & 0x2)
asm volatile (";;itr.d dtr[%0]=%1" ia64_itrd(tr_num, pte);
:: "r"(tr_num), "r"(pte) : "memory");
} }
/* /*
...@@ -561,13 +502,14 @@ static inline void ...@@ -561,13 +502,14 @@ static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size) __u64 log_page_size)
{ {
asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
ia64_stop();
/* as per EAS2.6, itc must be the last instruction in an instruction group */ /* as per EAS2.6, itc must be the last instruction in an instruction group */
if (target_mask & 0x1) if (target_mask & 0x1)
asm volatile ("itc.i %0;;" :: "r"(pte) : "memory"); ia64_itci(pte);
if (target_mask & 0x2) if (target_mask & 0x2)
asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory"); ia64_itcd(pte);
} }
/* /*
...@@ -578,16 +520,17 @@ static inline void ...@@ -578,16 +520,17 @@ static inline void
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
{ {
if (target_mask & 0x1) if (target_mask & 0x1)
asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); ia64_ptri(vmaddr, (log_size << 2));
if (target_mask & 0x2) if (target_mask & 0x2)
asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); ia64_ptrd(vmaddr, (log_size << 2));
} }
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */ /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
static inline void static inline void
ia64_set_iva (void *ivt_addr) ia64_set_iva (void *ivt_addr)
{ {
asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory"); ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
ia64_srlz_i();
} }
/* Set the page table address and control bits. */ /* Set the page table address and control bits. */
...@@ -595,79 +538,33 @@ static inline void ...@@ -595,79 +538,33 @@ static inline void
ia64_set_pta (__u64 pta) ia64_set_pta (__u64 pta)
{ {
/* Note: srlz.i implies srlz.d */ /* Note: srlz.i implies srlz.d */
asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory"); ia64_setreg(_IA64_REG_CR_PTA, pta);
} ia64_srlz_i();
static inline __u64
ia64_get_cpuid (__u64 regnum)
{
__u64 r;
asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
return r;
} }
static inline void static inline void
ia64_eoi (void) ia64_eoi (void)
{ {
asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory"); ia64_setreg(_IA64_REG_CR_EOI, 0);
ia64_srlz_d();
} }
static inline void #define cpu_relax() ia64_hint(ia64_hint_pause)
ia64_set_lrr0 (unsigned long val)
{
asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
}
static inline void static inline void
ia64_hint_pause (void) ia64_set_lrr0 (unsigned long val)
{ {
asm volatile ("hint @pause" ::: "memory"); ia64_setreg(_IA64_REG_CR_LRR0, val);
ia64_srlz_d();
} }
#define cpu_relax() ia64_hint_pause()
static inline void static inline void
ia64_set_lrr1 (unsigned long val) ia64_set_lrr1 (unsigned long val)
{ {
asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory"); ia64_setreg(_IA64_REG_CR_LRR1, val);
} ia64_srlz_d();
static inline void
ia64_set_pmv (__u64 val)
{
asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory");
}
static inline __u64
ia64_get_pmc (__u64 regnum)
{
__u64 retval;
asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
return retval;
}
static inline void
ia64_set_pmc (__u64 regnum, __u64 value)
{
asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
}
static inline __u64
ia64_get_pmd (__u64 regnum)
{
__u64 retval;
asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
return retval;
} }
static inline void
ia64_set_pmd (__u64 regnum, __u64 value)
{
asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
}
/* /*
* Given the address to which a spill occurred, return the unat bit * Given the address to which a spill occurred, return the unat bit
...@@ -713,160 +610,35 @@ thread_saved_pc (struct task_struct *t) ...@@ -713,160 +610,35 @@ thread_saved_pc (struct task_struct *t)
* Get the current instruction/program counter value. * Get the current instruction/program counter value.
*/ */
#define current_text_addr() \ #define current_text_addr() \
({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; }) ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
/*
* Set the correctable machine check vector register
*/
static inline void
ia64_set_cmcv (__u64 val)
{
asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory");
}
/*
* Read the correctable machine check vector register
*/
static inline __u64
ia64_get_cmcv (void)
{
__u64 val;
asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
return val;
}
static inline __u64 static inline __u64
ia64_get_ivr (void) ia64_get_ivr (void)
{ {
__u64 r; __u64 r;
asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
return r;
}
static inline void
ia64_set_tpr (__u64 val)
{
asm volatile ("mov cr.tpr=%0" :: "r"(val));
}
static inline __u64
ia64_get_tpr (void)
{
__u64 r;
asm volatile ("mov %0=cr.tpr" : "=r"(r));
return r;
}
static inline void
ia64_set_irr0 (__u64 val)
{
asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory");
ia64_srlz_d(); ia64_srlz_d();
} r = ia64_getreg(_IA64_REG_CR_IVR);
static inline __u64
ia64_get_irr0 (void)
{
__u64 val;
/* this is volatile because irr may change unbeknownst to gcc... */
asm volatile("mov %0=cr.irr0" : "=r"(val));
return val;
}
static inline void
ia64_set_irr1 (__u64 val)
{
asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory");
ia64_srlz_d(); ia64_srlz_d();
} return r;
static inline __u64
ia64_get_irr1 (void)
{
__u64 val;
/* this is volatile because irr may change unbeknownst to gcc... */
asm volatile("mov %0=cr.irr1" : "=r"(val));
return val;
}
static inline void
ia64_set_irr2 (__u64 val)
{
asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
static inline __u64
ia64_get_irr2 (void)
{
__u64 val;
/* this is volatile because irr may change unbeknownst to gcc... */
asm volatile("mov %0=cr.irr2" : "=r"(val));
return val;
}
static inline void
ia64_set_irr3 (__u64 val)
{
asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
}
static inline __u64
ia64_get_irr3 (void)
{
__u64 val;
/* this is volatile because irr may change unbeknownst to gcc... */
asm volatile ("mov %0=cr.irr3" : "=r"(val));
return val;
}
static inline __u64
ia64_get_gp(void)
{
__u64 val;
asm ("mov %0=gp" : "=r"(val));
return val;
}
static inline void
ia64_set_ibr (__u64 regnum, __u64 value)
{
asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value));
} }
static inline void static inline void
ia64_set_dbr (__u64 regnum, __u64 value) ia64_set_dbr (__u64 regnum, __u64 value)
{ {
asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value)); __ia64_set_dbr(regnum, value);
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
asm volatile (";; srlz.d"); ia64_srlz_d();
#endif #endif
} }
static inline __u64
ia64_get_ibr (__u64 regnum)
{
__u64 retval;
asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum));
return retval;
}
static inline __u64 static inline __u64
ia64_get_dbr (__u64 regnum) ia64_get_dbr (__u64 regnum)
{ {
__u64 retval; __u64 retval;
asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum)); retval = __ia64_get_dbr(regnum);
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
asm volatile (";; srlz.d"); ia64_srlz_d();
#endif #endif
return retval; return retval;
} }
...@@ -875,37 +647,21 @@ ia64_get_dbr (__u64 regnum) ...@@ -875,37 +647,21 @@ ia64_get_dbr (__u64 regnum)
#ifdef SMART_COMPILER #ifdef SMART_COMPILER
# define ia64_rotr(w,n) \ # define ia64_rotr(w,n) \
({ \ ({ \
__u64 _w = (w), _n = (n); \ __u64 __ia64_rotr_w = (w), _n = (n); \
\ \
(_w >> _n) | (_w << (64 - _n)); \ (__ia64_rotr_w >> _n) | (__ia64_rotr_w << (64 - _n)); \
}) })
#else #else
# define ia64_rotr(w,n) \ # define ia64_rotr(w,n) \
({ \ ({ \
__u64 result; \ __u64 __ia64_rotr_w; \
asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \ __ia64_rotr_w = ia64_shrp((w), (w), (n)); \
result; \ __ia64_rotr_w; \
}) })
#endif #endif
#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n)) #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
static inline __u64
ia64_thash (__u64 addr)
{
__u64 result;
asm ("thash %0=%1" : "=r"(result) : "r" (addr));
return result;
}
static inline __u64
ia64_tpa (__u64 addr)
{
__u64 result;
asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
return result;
}
/* /*
* Take a mapped kernel address and return the equivalent address * Take a mapped kernel address and return the equivalent address
* in the region 7 identity mapped virtual area. * in the region 7 identity mapped virtual area.
...@@ -914,7 +670,7 @@ static inline void * ...@@ -914,7 +670,7 @@ static inline void *
ia64_imva (void *addr) ia64_imva (void *addr)
{ {
void *result; void *result;
asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); result = (void *) ia64_tpa(addr);
return __va(result); return __va(result);
} }
...@@ -926,13 +682,13 @@ ia64_imva (void *addr) ...@@ -926,13 +682,13 @@ ia64_imva (void *addr)
static inline void static inline void
prefetch (const void *x) prefetch (const void *x)
{ {
__asm__ __volatile__ ("lfetch [%0]" : : "r"(x)); ia64_lfetch(ia64_lfhint_none, x);
} }
static inline void static inline void
prefetchw (const void *x) prefetchw (const void *x)
{ {
__asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x)); ia64_lfetch_excl(ia64_lfhint_none, x);
} }
#define spin_lock_prefetch(x) prefetchw(x) #define spin_lock_prefetch(x) prefetchw(x)
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/intrinsics.h>
/* /*
* the semaphore definition * the semaphore definition
*/ */
...@@ -81,9 +83,8 @@ init_rwsem (struct rw_semaphore *sem) ...@@ -81,9 +83,8 @@ init_rwsem (struct rw_semaphore *sem)
static inline void static inline void
__down_read (struct rw_semaphore *sem) __down_read (struct rw_semaphore *sem)
{ {
int result; int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
"=r"(result) : "r"(&sem->count) : "memory");
if (result < 0) if (result < 0)
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -111,9 +112,8 @@ __down_write (struct rw_semaphore *sem) ...@@ -111,9 +112,8 @@ __down_write (struct rw_semaphore *sem)
static inline void static inline void
__up_read (struct rw_semaphore *sem) __up_read (struct rw_semaphore *sem)
{ {
int result; int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
"=r"(result) : "r"(&sem->count) : "memory");
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
} }
......
...@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, ...@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
extern unsigned long sal_platform_features; extern unsigned long sal_platform_features;
struct sal_ret_values {
long r8; long r9; long r10; long r11;
};
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */ #endif /* _ASM_IA64_PAL_H */
...@@ -79,7 +79,6 @@ typedef struct siginfo { ...@@ -79,7 +79,6 @@ typedef struct siginfo {
* si_code is non-zero and __ISR_VALID is set in si_flags. * si_code is non-zero and __ISR_VALID is set in si_flags.
*/ */
#define si_isr _sifields._sigfault._isr #define si_isr _sifields._sigfault._isr
#define si_pfm_ovfl _sifields._sigprof._pfm_ovfl_counters
/* /*
* Flag values for si_flags: * Flag values for si_flags:
......
...@@ -106,7 +106,7 @@ hard_smp_processor_id (void) ...@@ -106,7 +106,7 @@ hard_smp_processor_id (void)
unsigned long bits; unsigned long bits;
} lid; } lid;
lid.bits = ia64_get_lid(); lid.bits = ia64_getreg(_IA64_REG_CR_LID);
return lid.f.id << 8 | lid.f.eid; return lid.f.id << 8 | lid.f.eid;
} }
......
...@@ -11,11 +11,23 @@ ...@@ -11,11 +11,23 @@
extern void * sn_io_addr(unsigned long port); /* Forward definition */ extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */ extern void sn_mmiob(void); /* Forward definition */
#include <asm/intrinsics.h>
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long); extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
/* /*
* The following routines are SN Platform specific, called when * The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform * a reference is made to inX/outX set macros. SN Platform
...@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long); ...@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long);
*/ */
static inline unsigned int static inline unsigned int
__sn_inb (unsigned long port) ___sn_inb (unsigned long port)
{ {
volatile unsigned char *addr; volatile unsigned char *addr;
unsigned char ret = -1; unsigned char ret = -1;
...@@ -40,7 +52,7 @@ __sn_inb (unsigned long port) ...@@ -40,7 +52,7 @@ __sn_inb (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__sn_inw (unsigned long port) ___sn_inw (unsigned long port)
{ {
volatile unsigned short *addr; volatile unsigned short *addr;
unsigned short ret = -1; unsigned short ret = -1;
...@@ -54,7 +66,7 @@ __sn_inw (unsigned long port) ...@@ -54,7 +66,7 @@ __sn_inw (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__sn_inl (unsigned long port) ___sn_inl (unsigned long port)
{ {
volatile unsigned int *addr; volatile unsigned int *addr;
unsigned int ret = -1; unsigned int ret = -1;
...@@ -68,7 +80,7 @@ __sn_inl (unsigned long port) ...@@ -68,7 +80,7 @@ __sn_inl (unsigned long port)
} }
static inline void static inline void
__sn_outb (unsigned char val, unsigned long port) ___sn_outb (unsigned char val, unsigned long port)
{ {
volatile unsigned char *addr; volatile unsigned char *addr;
...@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port) ...@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port)
} }
static inline void static inline void
__sn_outw (unsigned short val, unsigned long port) ___sn_outw (unsigned short val, unsigned long port)
{ {
volatile unsigned short *addr; volatile unsigned short *addr;
...@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port) ...@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port)
} }
static inline void static inline void
__sn_outl (unsigned int val, unsigned long port) ___sn_outl (unsigned int val, unsigned long port)
{ {
volatile unsigned int *addr; volatile unsigned int *addr;
...@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port) ...@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port)
*/ */
static inline unsigned char static inline unsigned char
__sn_readb (void *addr) ___sn_readb (void *addr)
{ {
unsigned char val; unsigned char val;
...@@ -121,7 +133,7 @@ __sn_readb (void *addr) ...@@ -121,7 +133,7 @@ __sn_readb (void *addr)
} }
static inline unsigned short static inline unsigned short
__sn_readw (void *addr) ___sn_readw (void *addr)
{ {
unsigned short val; unsigned short val;
...@@ -132,7 +144,7 @@ __sn_readw (void *addr) ...@@ -132,7 +144,7 @@ __sn_readw (void *addr)
} }
static inline unsigned int static inline unsigned int
__sn_readl (void *addr) ___sn_readl (void *addr)
{ {
unsigned int val; unsigned int val;
...@@ -143,7 +155,7 @@ __sn_readl (void *addr) ...@@ -143,7 +155,7 @@ __sn_readl (void *addr)
} }
static inline unsigned long static inline unsigned long
__sn_readq (void *addr) ___sn_readq (void *addr)
{ {
unsigned long val; unsigned long val;
......
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0 #define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) #define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif #endif
/* /*
...@@ -98,8 +98,8 @@ ...@@ -98,8 +98,8 @@
*/ */
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff) #define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3) #define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff) #define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff)
#define get_slice() ((ia64_get_lid() >> 28) & 0xf) #define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff) #define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
/* /*
......
...@@ -9,11 +9,13 @@ ...@@ -9,11 +9,13 @@
* This file is used for SMP configurations only. * This file is used for SMP configurations only.
*/ */
#include <linux/compiler.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/intrinsics.h>
#include <asm/system.h>
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
...@@ -102,8 +104,8 @@ typedef struct { ...@@ -102,8 +104,8 @@ typedef struct {
do { \ do { \
rwlock_t *__read_lock_ptr = (rw); \ rwlock_t *__read_lock_ptr = (rw); \
\ \
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
while (*(volatile int *)__read_lock_ptr < 0) \ while (*(volatile int *)__read_lock_ptr < 0) \
cpu_relax(); \ cpu_relax(); \
} \ } \
...@@ -112,7 +114,7 @@ do { \ ...@@ -112,7 +114,7 @@ do { \
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
do { \ do { \
rwlock_t *__read_lock_ptr = (rw); \ rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0) } while (0)
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
......
...@@ -55,12 +55,6 @@ extern struct ia64_boot_param { ...@@ -55,12 +55,6 @@ extern struct ia64_boot_param {
__u64 initrd_size; __u64 initrd_size;
} *ia64_boot_param; } *ia64_boot_param;
static inline void
ia64_insn_group_barrier (void)
{
__asm__ __volatile__ (";;" ::: "memory");
}
/* /*
* Macros to force memory ordering. In these descriptions, "previous" * Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all * and "subsequent" refer to program order; "visible" means that all
...@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void) ...@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void)
* it's (presumably) much slower than mf and (b) mf.a is supported for * it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only. * sequential memory pages only.
*/ */
#define mb() __asm__ __volatile__ ("mf" ::: "memory") #define mb() ia64_mf()
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
...@@ -119,22 +113,26 @@ ia64_insn_group_barrier (void) ...@@ -119,22 +113,26 @@ ia64_insn_group_barrier (void)
/* clearing psr.i is implicitly serialized (visible by next insn) */ /* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */ /* setting psr.i requires data serialization */
#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \ #define __local_irq_save(x) \
"rsm psr.i;;" \ do { \
: "=r" (x) :: "memory") (x) = ia64_getreg(_IA64_REG_PSR); \
#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") ia64_stop(); \
#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ ia64_rsm(IA64_PSR_I); \
"(p6) ssm psr.i;" \ } while (0)
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \ #define __local_irq_disable() \
:: "r" ((x) & IA64_PSR_I) \ do { \
: "p6", "p7", "memory") ia64_stop(); \
ia64_rsm(IA64_PSR_I); \
} while (0)
#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
#ifdef CONFIG_IA64_DEBUG_IRQ #ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip; extern unsigned long last_cli_ip;
# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip)) # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
# define local_irq_save(x) \ # define local_irq_save(x) \
do { \ do { \
...@@ -164,14 +162,14 @@ do { \ ...@@ -164,14 +162,14 @@ do { \
# define local_irq_restore(x) __local_irq_restore(x) # define local_irq_restore(x) __local_irq_restore(x)
#endif /* !CONFIG_IA64_DEBUG_IRQ */ #endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") #define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") #define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR))
#define irqs_disabled() \ #define irqs_disabled() \
({ \ ({ \
unsigned long flags; \ unsigned long __ia64_id_flags; \
local_save_flags(flags); \ local_save_flags(__ia64_id_flags); \
(flags & IA64_PSR_I) == 0; \ (__ia64_id_flags & IA64_PSR_I) == 0; \
}) })
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* Also removed cacheflush_time as it's entirely unused. * Also removed cacheflush_time as it's entirely unused.
*/ */
#include <asm/intrinsics.h>
#include <asm/processor.h> #include <asm/processor.h>
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
...@@ -32,7 +33,7 @@ get_cycles (void) ...@@ -32,7 +33,7 @@ get_cycles (void)
{ {
cycles_t ret; cycles_t ret;
__asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret)); ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret; return ret;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/intrinsics.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) ...@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else #else
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); ia64_ptcl(addr, (PAGE_SHIFT << 2));
else else
vma->vm_mm->context = 0; vma->vm_mm->context = 0;
#endif #endif
......
...@@ -334,73 +334,20 @@ waitpid (int pid, int * wait_stat, int flags) ...@@ -334,73 +334,20 @@ waitpid (int pid, int * wait_stat, int flags)
} }
static inline int extern int execve (const char *filename, char *const av[], char *const ep[]);
execve (const char *filename, char *const av[], char *const ep[]) extern pid_t clone (unsigned long flags, void *sp);
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_execve;
register long out0 asm("out0") = (long)filename;
register long out1 asm("out1") = (long)av;
register long out2 asm("out2") = (long)ep;
asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2)
: "2" (r15), "3" (out0), "4" (out1), "5" (out2)
: "memory", "out3", "out4", "out5", "out6", "out7",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
return r8;
}
static inline pid_t
clone (unsigned long flags, void *sp)
{
register long r8 asm("r8");
register long r10 asm("r10");
register long r15 asm("r15") = __NR_clone;
register long out0 asm("out0") = (long)flags;
register long out1 asm("out1") = (long)sp;
long retval;
/* clone clobbers current, hence the "r13" in the clobbers list */
asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t"
: "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1)
: "2" (r15), "3" (out0), "4" (out1)
: "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13",
/* Non-stacked integer registers, minus r8, r10, r15, r13 */
"r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
"r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
"r28", "r29", "r30", "r31",
/* Predicate registers. */
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
/* Non-rotating fp registers. */
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
/* Branch registers. */
"b6", "b7" );
retval = r8;
return retval;;
}
#endif /* __KERNEL_SYSCALLS__ */ #endif /* __KERNEL_SYSCALLS__ */
/* /*
* "Conditional" syscalls * "Conditional" syscalls
* *
* What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
* all toolchains, so we just do it by hand. Note, this macro can only be used in the * kernel/sys.c. This version causes warnings because the declaration isn't a
* file which defines sys_ni_syscall, i.e., in kernel/sys.c. * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
* declarations have prototypes at the moment.
*/ */
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); #define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall")));
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment