Commit 70d68bd3 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.7.3 -> v2.4.7.4

  - David Mosberger: IA64 update
  - Geert Uytterhoeven: cleanup, new atyfb
  - Marcelo Tosatti: zone aging fixes
  - me, others: limit IO requests sanely
parent 48ad999d
......@@ -3245,6 +3245,18 @@ CONFIG_FB_ATY
module will be called atyfb.o. If you want to compile it as a
module, say M here and read Documentation/modules.txt.
ATI Mach64 GX display support (EXPERIMENTAL)
CONFIG_FB_ATY_GX
This options adds support for the first generation ATI Mach64
graphics chips, i.e. the Mach64 GX and CX. Note that this support is
limited.
ATI Mach64 CT/VT/GT/LT display support (EXPERIMENTAL)
CONFIG_FB_ATY_CT
This option adss support for ATI Mach64 graphics chips starting
with the Mach64 CT family. This includes the Mach64 VT (limited
support), GT (3D RAGE family), and LT.
ATI Rage128 display support (EXPERIMENTAL)
CONFIG_FB_ATY128
This driver supports graphics boards with the ATI Rage128 chips.
......
......@@ -608,6 +608,13 @@ M: nils@kernelconcepts.de
W: http://www.kernelconcepts.de/
S: Maintained
IA64 (Itanium) PLATFORM
P: David Mosberger-Tang
M: davidm@hpl.hp.com
L: linux-ia64@linuxia64.org
W: http://www.linuxia64.org/
S: Maintained
IBM MCA SCSI SUBSYSTEM DRIVER
P: Michael Lang
M: langa2@kph.uni-mainz.de
......
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 8
EXTRAVERSION =-pre3
EXTRAVERSION =-pre4
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -14,18 +14,18 @@ AWK := awk
export AWK
LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds
AFLAGS += -Wa,-x
AFLAGS_KERNEL := -mconstant-gp
EXTRA =
CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
-funwind-tables -falign-functions=32
# -frename-registers (this crashes the Nov 17 compiler...)
CFLAGS := $(CFLAGS) -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 -falign-functions=32
CFLAGS_KERNEL := -mconstant-gp
ifeq ($(CONFIG_ITANIUM_ASTEP_SPECIFIC),y)
CFLAGS += -ma-step
GCC_VERSION=$(shell $(CROSS_COMPILE)$(HOSTCC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
ifneq ($(GCC_VERSION),2)
CFLAGS += -frename-registers
endif
ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
CFLAGS += -mb-step
endif
......
......@@ -87,9 +87,6 @@ _start (void)
asm volatile ("movl gp=__gp;;" ::: "memory");
asm volatile ("mov sp=%0" :: "r"(stack) : "memory");
asm volatile ("bsw.1;;");
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
asm volative ("nop 0;; nop 0;; nop 0;;");
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
......
......@@ -26,6 +26,12 @@ define_bool CONFIG_SBUS n
define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_KERNEL_CONFIG y
fi
choice 'IA-64 processor type' \
"Itanium CONFIG_ITANIUM \
McKinley CONFIG_MCKINLEY" Itanium
......@@ -44,7 +50,6 @@ choice 'Kernel page size' \
if [ "$CONFIG_ITANIUM" = "y" ]; then
define_bool CONFIG_IA64_BRL_EMU y
bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC
bool ' Enable Itanium B-step specific code' CONFIG_ITANIUM_BSTEP_SPECIFIC
if [ "$CONFIG_ITANIUM_BSTEP_SPECIFIC" = "y" ]; then
bool ' Enable Itanium B0-step specific code' CONFIG_ITANIUM_B0_SPECIFIC
......@@ -59,7 +64,7 @@ if [ "$CONFIG_ITANIUM" = "y" ]; then
if [ "$CONFIG_ITANIUM_CSTEP_SPECIFIC" = "y" ]; then
bool ' Enable Itanium C0-step specific code' CONFIG_ITANIUM_C0_SPECIFIC
fi
if [ "$CONFIG_ITANIUM_ASTEP_SPECIFIC" = "y" -o "$CONFIG_ITANIUM_B0_SPECIFIC" = "y" \
if [ "$CONFIG_ITANIUM_B0_SPECIFIC" = "y" \
-o "$CONFIG_ITANIUM_B1_SPECIFIC" = "y" -o "$CONFIG_ITANIUM_B2_SPECIFIC" = "y" ]; then
define_bool CONFIG_ITANIUM_PTCG n
else
......@@ -84,13 +89,7 @@ fi
if [ "$CONFIG_IA64_DIG" = "y" ]; then
bool ' Force interrupt redirection' CONFIG_IA64_HAVE_IRQREDIR
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
bool ' Enable ACPI 2.0 with errata 1.3' CONFIG_ACPI20
bool ' ACPI kernel configuration manager (EXPERIMENTAL)' CONFIG_ACPI_KERNEL_CONFIG
if [ "$CONFIG_ACPI_KERNEL_CONFIG" = "y" ]; then
define_bool CONFIG_PM y
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_INTERPRETER y
fi
define_bool CONFIG_PM y
fi
if [ "$CONFIG_IA64_SGI_SN1" = "y" ]; then
......@@ -112,7 +111,7 @@ define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore.
bool 'SMP support' CONFIG_SMP
bool 'Performance monitor support' CONFIG_PERFMON
tristate '/proc/pal support' CONFIG_IA64_PALINFO
tristate '/proc/efi support' CONFIG_IA64_EFIVARS
tristate '/proc/efi/vars support' CONFIG_EFI_VARS
bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
......@@ -123,6 +122,8 @@ tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
......@@ -247,6 +248,10 @@ endmenu
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
......
......@@ -27,28 +27,15 @@ extern struct console hpsim_cons;
/*
* Simulator system call.
*/
inline long
ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr)
{
#ifdef __GCC_DOESNT_KNOW_IN_REGS__
register long in0 asm ("r32") = arg0;
register long in1 asm ("r33") = arg1;
register long in2 asm ("r34") = arg2;
register long in3 asm ("r35") = arg3;
#else
register long in0 asm ("in0") = arg0;
register long in1 asm ("in1") = arg1;
register long in2 asm ("in2") = arg2;
register long in3 asm ("in3") = arg3;
#endif
register long r8 asm ("r8");
register long r15 asm ("r15") = nr;
asm volatile ("break 0x80001"
: "=r"(r8)
: "r"(r15), "r"(in0), "r"(in1), "r"(in2), "r"(in3));
return r8;
}
asm (".text\n"
".align 32\n"
".global ia64_ssc\n"
".proc ia64_ssc\n"
"ia64_ssc:\n"
"mov r15=r36\n"
"break 0x80001\n"
"br.ret.sptk.many rp\n"
".endp\n");
void
ia64_ssc_connect_irq (long intr, long irq)
......
......@@ -11,7 +11,8 @@ all: ia32.o
O_TARGET := ia32.o
obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o ia32_support.o ia32_traps.o binfmt_elf32.o
obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o ia32_support.o ia32_traps.o \
binfmt_elf32.o ia32_ldt.o
clean::
......
......@@ -2,8 +2,11 @@
* IA-32 ELF support.
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2001 Hewlett-Packard Co
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
* 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
*/
#include <linux/config.h>
......@@ -35,8 +38,8 @@
#undef CLOCKS_PER_SEC
#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC
extern void ia64_elf32_init(struct pt_regs *regs);
extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address);
extern void ia64_elf32_init (struct pt_regs *regs);
extern void put_dirty_page (struct task_struct * tsk, struct page *page, unsigned long address);
#define ELF_PLAT_INIT(_r) ia64_elf32_init(_r)
#define setup_arg_pages(bprm) ia32_setup_arg_pages(bprm)
......@@ -49,7 +52,7 @@ extern void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned
unsigned long *ia32_gdt_table, *ia32_tss;
struct page *
put_shared_page(struct task_struct * tsk, struct page *page, unsigned long address)
put_shared_page (struct task_struct * tsk, struct page *page, unsigned long address)
{
pgd_t * pgd;
pmd_t * pmd;
......@@ -83,85 +86,99 @@ put_shared_page(struct task_struct * tsk, struct page *page, unsigned long addre
return 0;
}
void ia64_elf32_init(struct pt_regs *regs)
void
ia64_elf32_init (struct pt_regs *regs)
{
struct vm_area_struct *vma;
int nr;
put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_PAGE_OFFSET);
/*
* Map GDT and TSS below 4GB, where the processor can find them. We need to map
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_GDT_OFFSET);
if (PAGE_SHIFT <= IA32_PAGE_SHIFT)
put_shared_page(current, virt_to_page(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
put_shared_page(current, virt_to_page(ia32_tss), IA32_TSS_OFFSET);
nr = smp_processor_id();
/*
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt().
*/
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_LDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
vma->vm_private_data = NULL;
insert_vm_struct(current->mm, vma);
}
/* Do all the IA-32 setup here */
nr = smp_processor_id();
current->thread.map_base = 0x40000000;
current->thread.task_size = 0xc0000000; /* use what Linux/x86 uses... */
current->thread.map_base = IA32_PAGE_OFFSET/3;
current->thread.task_size = IA32_PAGE_OFFSET; /* use what Linux/x86 uses... */
set_fs(USER_DS); /* set addr limit for new TASK_SIZE */
/* setup ia32 state for ia32_load_state */
/* Setup the segment selectors */
regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
current->thread.eflag = IA32_EFLAG;
current->thread.csd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L, 3L, 1L, 1L, 1L);
current->thread.ssd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
current->thread.tssd = IA64_SEG_DESCRIPTOR(IA32_PAGE_OFFSET + PAGE_SIZE, 0x1FFFL, 0xBL,
1L, 3L, 1L, 1L, 1L);
/* CS descriptor */
__asm__("mov ar.csd = %0" : /* no outputs */
: "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L,
3L, 1L, 1L, 1L));
/* SS descriptor */
__asm__("mov ar.ssd = %0" : /* no outputs */
: "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L,
3L, 1L, 1L, 1L));
/* EFLAGS */
__asm__("mov ar.eflag = %0" : /* no outputs */ : "r" (IA32_EFLAG));
/* Control registers */
__asm__("mov ar.fsr = %0"
: /* no outputs */
: "r" ((ulong)IA32_FSR_DEFAULT));
__asm__("mov ar.fcr = %0"
: /* no outputs */
: "r" ((ulong)IA32_FCR_DEFAULT));
__asm__("mov ar.fir = r0");
__asm__("mov ar.fdr = r0");
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
/* Get the segment selectors right */
regs->r16 = (__USER_DS << 16) | (__USER_DS); /* ES == DS, GS, FS are zero */
regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32)
| (__USER_DS << 16) | __USER_CS;
/* Setup other segment descriptors - ESD, DSD, FSD, GSD */
regs->r24 = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
regs->r27 = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
regs->r28 = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
regs->r29 = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
/* Setup the LDT and GDT */
regs->r30 = ia32_gdt_table[_LDT(nr)];
regs->r31 = IA64_SEG_DESCRIPTOR(0xc0000000L, 0x400L, 0x3L, 1L, 3L,
1L, 1L, 1L);
/* Clear psr.ac */
regs->cr_ipsr &= ~IA64_PSR_AC;
/* Setup the segment descriptors */
regs->r24 = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[__USER_DS >> 3]); /* ESD */
regs->r27 = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[__USER_DS >> 3]); /* DSD */
regs->r28 = 0; /* FSD (null) */
regs->r29 = 0; /* GSD (null) */
regs->r30 = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[_LDT(nr)]); /* LDTD */
/*
* Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor
* format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
* architecture manual.
*/
regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1, 0,
0, 0, 0, 0, 0, 0));
ia64_psr(regs)->ac = 0; /* turn off alignment checking */
regs->loadrs = 0;
}
/*
* According to the ABI %edx points to an `atexit' handler. Since we don't have
* one we'll set it to 0 and initialize all the other registers just to make
* things more deterministic, ala the i386 implementation.
*/
regs->r8 = 0; /* %eax */
regs->r11 = 0; /* %ebx */
regs->r9 = 0; /* %ecx */
regs->r10 = 0; /* %edx */
regs->r13 = 0; /* %ebp */
regs->r14 = 0; /* %esi */
regs->r15 = 0; /* %edi */
#undef STACK_TOP
#define STACK_TOP ((IA32_PAGE_OFFSET/3) * 2)
current->thread.eflag = IA32_EFLAG;
current->thread.fsr = IA32_FSR_DEFAULT;
current->thread.fcr = IA32_FCR_DEFAULT;
current->thread.fir = 0;
current->thread.fdr = 0;
current->thread.csd = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[__USER_CS >> 3]);
current->thread.ssd = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[__USER_DS >> 3]);
current->thread.tssd = IA32_SEG_UNSCRAMBLE(ia32_gdt_table[_TSS(nr)]);
ia32_load_state(current);
}
int ia32_setup_arg_pages(struct linux_binprm *bprm)
int
ia32_setup_arg_pages (struct linux_binprm *bprm)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
int i;
stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
bprm->p += stack_base;
if (bprm->loader)
......@@ -175,7 +192,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm)
{
mpnt->vm_mm = current->mm;
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
mpnt->vm_end = STACK_TOP;
mpnt->vm_end = IA32_STACK_TOP;
mpnt->vm_page_prot = PAGE_COPY;
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_ops = NULL;
......@@ -197,15 +214,15 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm)
}
static unsigned long
ia32_mm_addr(unsigned long addr)
ia32_mm_addr (unsigned long addr)
{
struct vm_area_struct *vma;
if ((vma = find_vma(current->mm, addr)) == NULL)
return(ELF_PAGESTART(addr));
return ELF_PAGESTART(addr);
if (vma->vm_start > addr)
return(ELF_PAGESTART(addr));
return(ELF_PAGEALIGN(addr));
return ELF_PAGESTART(addr);
return ELF_PAGEALIGN(addr);
}
/*
......@@ -232,22 +249,9 @@ elf_map32 (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int p
*/
if (addr == 0)
addr += PAGE_SIZE;
#if 1
set_brk(ia32_mm_addr(addr), addr + eppnt->p_memsz);
memset((char *) addr + eppnt->p_filesz, 0, eppnt->p_memsz - eppnt->p_filesz);
kernel_read(filep, eppnt->p_offset, (char *) addr, eppnt->p_filesz);
retval = (unsigned long) addr;
#else
/* doesn't work yet... */
# define IA32_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_EXEC_PAGESIZE-1))
# define IA32_PAGEOFFSET(_v) ((_v) & (ELF_EXEC_PAGESIZE-1))
# define IA32_PAGEALIGN(_v) (((_v) + ELF_EXEC_PAGESIZE - 1) & ~(ELF_EXEC_PAGESIZE - 1))
down_write(&current->mm->mmap_sem);
retval = ia32_do_mmap(filep, IA32_PAGESTART(addr),
eppnt->p_filesz + IA32_PAGEOFFSET(eppnt->p_vaddr), prot, type,
eppnt->p_offset - IA32_PAGEOFFSET(eppnt->p_vaddr));
up_write(&current->mm->mmap_sem);
#endif
return retval;
}
......@@ -140,7 +140,7 @@ ia32_syscall_table:
data8 sys_lchown
data8 sys32_ni_syscall /* old break syscall holder */
data8 sys32_ni_syscall
data8 sys_lseek
data8 sys32_lseek
data8 sys_getpid /* 20 */
data8 sys_mount
data8 sys_oldumount
......@@ -233,7 +233,7 @@ ia32_syscall_table:
data8 sys32_ni_syscall
data8 sys_iopl /* 110 */
data8 sys_vhangup
data8 sys32_ni_syscall // used to be sys_idle
data8 sys32_ni_syscall /* used to be sys_idle */
data8 sys32_ni_syscall
data8 sys32_wait4
data8 sys_swapoff /* 115 */
......@@ -244,7 +244,7 @@ ia32_syscall_table:
data8 sys_clone /* 120 */
data8 sys_setdomainname
data8 sys32_newuname
data8 sys_modify_ldt
data8 sys32_modify_ldt
data8 sys_adjtimex
data8 sys32_mprotect /* 125 */
data8 sys_sigprocmask
......@@ -286,13 +286,13 @@ ia32_syscall_table:
data8 sys32_nanosleep
data8 sys_mremap
data8 sys_setresuid
data8 sys_getresuid /* 165 */
data8 sys32_getresuid /* 165 */
data8 sys_vm86
data8 sys_query_module
data8 sys_poll
data8 sys_nfsservctl
data8 sys_setresgid /* 170 */
data8 sys_getresgid
data8 sys32_getresgid
data8 sys_prctl
data8 sys32_rt_sigreturn
data8 sys32_rt_sigaction
......
/*
* Copyright (C) 2001 Hewlett-Packard Co
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Adapted from arch/i386/kernel/ldt.c
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include <asm/ia32.h>
/*
* read_ldt() is not really atomic - this is not a problem since synchronization of reads
* and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
* to protect the security checks done on new descriptors.
*/
static int
read_ldt (void *ptr, unsigned long bytecount)
{
char *src, *dst, buf[256]; /* temporary buffer (don't overflow kernel stack!) */
unsigned long bytes_left, n;
if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
bytes_left = bytecount;
src = (void *) IA32_LDT_OFFSET;
dst = ptr;
while (bytes_left) {
n = sizeof(buf);
if (n > bytes_left)
n = bytes_left;
/*
* We know we're reading valid memory, but we still must guard against
* running out of memory.
*/
if (__copy_from_user(buf, src, n))
return -EFAULT;
if (copy_to_user(dst, buf, n))
return -EFAULT;
src += n;
dst += n;
bytes_left -= n;
}
return bytecount;
}
static int
write_ldt (void * ptr, unsigned long bytecount, int oldmode)
{
struct ia32_modify_ldt_ldt_s ldt_info;
__u64 entry;
if (bytecount != sizeof(ldt_info))
return -EINVAL;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
return -EFAULT;
if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
return -EINVAL;
if (ldt_info.contents == 3) {
if (oldmode)
return -EINVAL;
if (ldt_info.seg_not_present == 0)
return -EINVAL;
}
if (ldt_info.base_addr == 0 && ldt_info.limit == 0
&& (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
&& ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
&& ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
/* allow LDTs to be cleared by the user */
entry = 0;
else
/* we must set the "Accessed" bit as IVE doesn't emulate it */
entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
(((ldt_info.read_exec_only ^ 1) << 1)
| (ldt_info.contents << 2)) | 1,
1, 3, ldt_info.seg_not_present ^ 1,
(oldmode ? 0 : ldt_info.useable),
ldt_info.seg_32bit,
ldt_info.limit_in_pages);
/*
* Install the new entry. We know we're accessing valid (mapped) user-level
* memory, but we still need to guard against out-of-memory, hence we must use
* put_user().
*/
return __put_user(entry, (__u64 *) IA32_LDT_OFFSET + ldt_info.entry_number);
}
asmlinkage int
sys32_modify_ldt (int func, void *ptr, unsigned int bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(ptr, bytecount);
break;
case 1:
ret = write_ldt(ptr, bytecount, 1);
break;
case 0x11:
ret = write_ldt(ptr, bytecount, 0);
break;
}
return ret;
}
/*
* IA32 helper functions
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2001 Hewlett-Packard Co
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
* 02/19/01 D. Mosberger dropped tssd; it's not needed
*/
......@@ -21,7 +26,7 @@ extern unsigned long *ia32_gdt_table, *ia32_tss;
extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
void
ia32_save_state (struct thread_struct *thread)
ia32_save_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
......@@ -33,28 +38,30 @@ ia32_save_state (struct thread_struct *thread)
"mov %5=ar.csd;"
"mov %6=ar.ssd;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
thread->eflag = eflag;
thread->fsr = fsr;
thread->fcr = fcr;
thread->fir = fir;
thread->fdr = fdr;
thread->csd = csd;
thread->ssd = ssd;
asm ("mov ar.k0=%0 ;;" :: "r"(thread->old_iob));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
t->thread.csd = csd;
t->thread.ssd = ssd;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
}
void
ia32_load_state (struct thread_struct *thread)
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr;
eflag = thread->eflag;
fsr = thread->fsr;
fcr = thread->fcr;
fir = thread->fir;
fdr = thread->fdr;
csd = thread->csd;
ssd = thread->ssd;
eflag = t->thread.eflag;
fsr = t->thread.fsr;
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
csd = t->thread.csd;
ssd = t->thread.ssd;
asm volatile ("mov ar.eflag=%0;"
"mov ar.fsr=%1;"
......@@ -64,17 +71,22 @@ ia32_load_state (struct thread_struct *thread)
"mov ar.csd=%5;"
"mov ar.ssd=%6;"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
asm ("mov %0=ar.k0 ;;" : "=r"(thread->old_iob));
asm ("mov ar.k0=%0 ;;" :: "r"(IA32_IOBASE));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
/* load TSS and LDT while preserving SS and CS: */
nr = smp_processor_id();
regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
}
/*
* Setup IA32 GDT and TSS
* Setup IA32 GDT and TSS
*/
void
ia32_gdt_init(void)
ia32_gdt_init (void)
{
unsigned long gdt_and_tss_page;
unsigned long gdt_and_tss_page, ldt_size;
int nr;
/* allocate two IA-32 pages of memory: */
gdt_and_tss_page = __get_free_pages(GFP_KERNEL,
......@@ -86,17 +98,28 @@ ia32_gdt_init(void)
/* Zero the gdt and tss */
memset((void *) gdt_and_tss_page, 0, 2*IA32_PAGE_SIZE);
/* CS descriptor in IA-32 format */
ia32_gdt_table[4] = IA32_SEG_DESCRIPTOR(0L, 0xBFFFFFFFL, 0xBL, 1L,
3L, 1L, 1L, 1L, 1L);
/* DS descriptor in IA-32 format */
ia32_gdt_table[5] = IA32_SEG_DESCRIPTOR(0L, 0xBFFFFFFFL, 0x3L, 1L,
3L, 1L, 1L, 1L, 1L);
/* CS descriptor in IA-32 (scrambled) format */
ia32_gdt_table[__USER_CS >> 3] =
IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET - 1) >> IA32_PAGE_SHIFT,
0xb, 1, 3, 1, 1, 1, 1);
/* DS descriptor in IA-32 (scrambled) format */
ia32_gdt_table[__USER_DS >> 3] =
IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET - 1) >> IA32_PAGE_SHIFT,
0x3, 1, 3, 1, 1, 1, 1);
/* We never change the TSS and LDT descriptors, so we can share them across all CPUs. */
ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
for (nr = 0; nr < NR_CPUS; ++nr) {
ia32_gdt_table[_TSS(nr)] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
0xb, 0, 3, 1, 1, 1, 0);
ia32_gdt_table[_LDT(nr)] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
0x2, 0, 3, 1, 1, 1, 0);
}
}
/*
* Handle bad IA32 interrupt via syscall
* Handle bad IA32 interrupt via syscall
*/
void
ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
......@@ -106,8 +129,7 @@ ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
die_if_kernel("Bad IA-32 interrupt", regs, int_num);
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = int_num; /* XXX is it legal to abuse si_errno like this? */
siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
siginfo.si_code = TRAP_BRKPT;
force_sig_info(SIGTRAP, &siginfo, current);
}
This diff is collapsed.
......@@ -13,13 +13,13 @@ O_TARGET := kernel.o
export-objs := ia64_ksyms.o
obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o irq.o irq_ia64.o irq_sapic.o ivt.o \
obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o ivt.o \
machvec.o pal.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o iosapic.o
obj-$(CONFIG_IA64_DIG) += iosapic.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IA64_EFIVARS) += efivars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
......
......@@ -25,14 +25,12 @@
#include <linux/irq.h>
#include <asm/acpi-ext.h>
#include <asm/acpikcfg.h>
#include <asm/efi.h>
#include <asm/io.h>
#include <asm/iosapic.h>
#include <asm/machvec.h>
#include <asm/page.h>
#ifdef CONFIG_ACPI_KERNEL_CONFIG
# include <asm/acpikcfg.h>
#endif
#undef ACPI_DEBUG /* Guess what this does? */
......@@ -40,7 +38,8 @@
int __initdata available_cpus;
int __initdata total_cpus;
void (*pm_idle)(void);
void (*pm_idle) (void);
void (*pm_power_off) (void);
asm (".weak iosapic_register_legacy_irq");
asm (".weak iosapic_init");
......@@ -206,11 +205,21 @@ acpi20_parse_madt (acpi_madt_t *madt)
case ACPI20_ENTRY_IO_SAPIC:
iosapic = (acpi_entry_iosapic_t *) p;
if (iosapic_init)
iosapic_init(iosapic->address, iosapic->irq_base);
/*
* The PCAT_COMPAT flag indicates that the system has a
* dual-8259 compatible setup.
*/
iosapic_init(iosapic->address, iosapic->irq_base,
#ifdef CONFIG_ITANIUM
1 /* fw on some Itanium systems is broken... */
#else
(madt->flags & MADT_PCAT_COMPAT)
#endif
);
break;
case ACPI20_ENTRY_PLATFORM_INT_SOURCE:
printk("ACPI 2.0 MADT: PLATFORM INT SOUCE\n");
printk("ACPI 2.0 MADT: PLATFORM INT SOURCE\n");
acpi20_platform(p);
break;
......@@ -257,6 +266,7 @@ acpi20_parse_madt (acpi_madt_t *madt)
int __init
acpi20_parse (acpi20_rsdp_t *rsdp20)
{
# ifdef CONFIG_ACPI
acpi_xsdt_t *xsdt;
acpi_desc_table_hdr_t *hdrp;
int tables, i;
......@@ -287,9 +297,7 @@ acpi20_parse (acpi20_rsdp_t *rsdp20)
hdrp->oem_revision >> 16,
hdrp->oem_revision & 0xffff);
#ifdef CONFIG_ACPI_KERNEL_CONFIG
acpi_cf_init((void *)rsdp20);
#endif
tables =(hdrp->length -sizeof(acpi_desc_table_hdr_t))>>3;
......@@ -305,17 +313,16 @@ acpi20_parse (acpi20_rsdp_t *rsdp20)
acpi20_parse_madt((acpi_madt_t *) hdrp);
}
#ifdef CONFIG_ACPI_KERNEL_CONFIG
acpi_cf_terminate();
#endif
#ifdef CONFIG_SMP
# ifdef CONFIG_SMP
if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = total_cpus;
#endif
# endif
# endif /* CONFIG_ACPI */
return 1;
}
/*
......@@ -395,7 +402,12 @@ acpi_parse_msapic (acpi_sapic_t *msapic)
case ACPI_ENTRY_IO_SAPIC:
iosapic = (acpi_entry_iosapic_t *) p;
if (iosapic_init)
iosapic_init(iosapic->address, iosapic->irq_base);
/*
* The ACPI I/O SAPIC table doesn't have a PCAT_COMPAT
* flag like the MADT table, but we can safely assume that
* ACPI 1.0b systems have a dual-8259 setup.
*/
iosapic_init(iosapic->address, iosapic->irq_base, 1);
break;
case ACPI_ENTRY_INT_SRC_OVERRIDE:
......@@ -421,6 +433,7 @@ acpi_parse_msapic (acpi_sapic_t *msapic)
int __init
acpi_parse (acpi_rsdp_t *rsdp)
{
# ifdef CONFIG_ACPI
acpi_rsdt_t *rsdt;
acpi_desc_table_hdr_t *hdrp;
long tables, i;
......@@ -439,9 +452,7 @@ acpi_parse (acpi_rsdp_t *rsdp)
printk("ACPI: %.6s %.8s %d.%d\n", rsdt->header.oem_id, rsdt->header.oem_table_id,
rsdt->header.oem_revision >> 16, rsdt->header.oem_revision & 0xffff);
#ifdef CONFIG_ACPI_KERNEL_CONFIG
acpi_cf_init(rsdp);
#endif
tables = (rsdt->header.length - sizeof(acpi_desc_table_hdr_t)) / 8;
for (i = 0; i < tables; i++) {
......@@ -454,16 +465,15 @@ acpi_parse (acpi_rsdp_t *rsdp)
acpi_parse_msapic((acpi_sapic_t *) hdrp);
}
#ifdef CONFIG_ACPI_KERNEL_CONFIG
acpi_cf_terminate();
#endif
#ifdef CONFIG_SMP
# ifdef CONFIG_SMP
if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = total_cpus;
#endif
# endif
# endif /* CONFIG_ACPI */
return 1;
}
......@@ -18,10 +18,12 @@
* Goutham Rao: <goutham.rao@intel.com>
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <asm/efi.h>
#include <asm/io.h>
......@@ -36,6 +38,17 @@ extern efi_status_t efi_call_phys (void *, ...);
struct efi efi;
static efi_runtime_services_t *runtime;
/*
* efi_dir is allocated here, but the directory isn't created
* here, as proc_mkdir() doesn't work this early in the bootup
* process. Therefore, each module, like efivars, must test for
* if (!efi_dir) efi_dir = proc_mkdir("efi", NULL);
* prior to creating their own entries under /proc/efi.
*/
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *efi_dir = NULL;
#endif
static unsigned long mem_limit = ~0UL;
static efi_status_t
......@@ -220,10 +233,8 @@ efi_map_pal_code (void)
/*
* The only ITLB entry in region 7 that is used is the one installed by
* __start(). That entry covers a 64MB range.
*
* XXX Fixme: should be dynamic here (for page size)
*/
mask = ~((1 << _PAGE_SIZE_64M) - 1);
mask = ~((1 << KERNEL_PG_SHIFT) - 1);
vaddr = PAGE_OFFSET + md->phys_addr;
/*
......@@ -246,14 +257,14 @@ efi_map_pal_code (void)
printk("CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
smp_processor_id(), md->phys_addr, md->phys_addr + (md->num_pages << 12),
vaddr & mask, (vaddr & mask) + 64*1024*1024);
vaddr & mask, (vaddr & mask) + KERNEL_PG_SIZE);
/*
* Cannot write to CRx with PSR.ic=1
*/
ia64_clear_ic(flags);
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL)), _PAGE_SIZE_64M);
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL)), KERNEL_PG_SHIFT);
local_irq_restore(flags);
ia64_srlz_i();
}
......@@ -441,3 +452,35 @@ efi_enter_virtual_mode (void)
efi.get_next_high_mono_count = __va(runtime->get_next_high_mono_count);
efi.reset_system = __va(runtime->reset_system);
}
/*
* Walk the EFI memory map looking for the I/O port range. There can only be one entry of
* this type, other I/O port ranges should be described via ACPI.
*/
u64
efi_get_iobase (void)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
/* paranoia attribute checking */
if (md->attribute == (EFI_MEMORY_UC | EFI_MEMORY_RUNTIME))
return md->phys_addr;
}
}
return 0;
}
static void __exit
efivars_exit(void)
{
remove_proc_entry(efi_dir->name, NULL);
}
......@@ -6,8 +6,8 @@
* This code takes all variables accessible from EFI runtime and
* exports them via /proc
*
* Reads to /proc/efi/varname return an efi_variable_t structure.
* Writes to /proc/efi/varname must be an efi_variable_t structure.
* Reads to /proc/efi/vars/varname return an efi_variable_t structure.
* Writes to /proc/efi/vars/varname must be an efi_variable_t structure.
* Writes with DataSize = 0 or Attributes = 0 deletes the variable.
* Writes with a new value in VariableName+VendorGuid creates
* a new variable.
......@@ -29,6 +29,15 @@
*
* Changelog:
*
* 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Moved vars from /proc/efi to /proc/efi/vars, and made
* efi.c own the /proc/efi directory.
* v0.03 release to linux-ia64@linuxia64.org
*
* 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* At the request of Stephane, moved ownership of /proc/efi
* to efi.c, and now efivars lives under /proc/efi/vars.
*
* 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Feedback received from Stephane Eranian incorporated.
* efivar_write() checks copy_from_user() return value.
......@@ -57,7 +66,7 @@
MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
MODULE_DESCRIPTION("/proc interface to EFI Variables");
#define EFIVARS_VERSION "0.02 2001-Mar-12"
#define EFIVARS_VERSION "0.03 2001-Apr-20"
static int
efivar_read(char *page, char **start, off_t off,
......@@ -92,7 +101,7 @@ typedef struct _efivar_entry_t {
spinlock_t efivars_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(efivar_list);
static struct proc_dir_entry *efi_dir = NULL;
static struct proc_dir_entry *efi_vars_dir = NULL;
#define efivar_entry(n) list_entry(n, efivar_entry_t, list)
......@@ -188,7 +197,7 @@ efivar_create_proc_entry(unsigned long variable_name_size,
/* Create the entry in proc */
new_efivar->entry = create_proc_entry(short_name, 0600, efi_dir);
new_efivar->entry = create_proc_entry(short_name, 0600, efi_vars_dir);
kfree(short_name); short_name = NULL;
if (!new_efivar->entry) return 1;
......@@ -286,7 +295,7 @@ efivar_write(struct file *file, const char *buffer,
/* Since the data ptr we've currently got is probably for
a different variable find the right variable.
This allows any properly formatted data structure to
be written to any of the files in /proc/efi and it will work.
be written to any of the files in /proc/efi/vars and it will work.
*/
list_for_each(pos, &efivar_list) {
search_efivar = efivar_entry(pos);
......@@ -320,7 +329,7 @@ efivar_write(struct file *file, const char *buffer,
if (!var_data->DataSize || !var_data->Attributes) {
/* We just deleted the NVRAM variable */
remove_proc_entry(efivar->entry->name, efi_dir);
remove_proc_entry(efivar->entry->name, efi_vars_dir);
list_del(&efivar->list);
kfree(efivar);
}
......@@ -354,12 +363,22 @@ efivars_init(void)
printk(KERN_INFO "EFI Variables Facility v%s\n", EFIVARS_VERSION);
/* Since efi.c happens before procfs is available,
we create the directory here if it doesn't
already exist. There's probably a better way
to do this.
*/
if (!efi_dir)
efi_dir = proc_mkdir("efi", NULL);
efi_vars_dir = proc_mkdir("vars", efi_dir);
/* Per EFI spec, the maximum storage allocated for both
the variable name and variable data is 1024 bytes.
*/
efi_dir = proc_mkdir("efi", NULL);
memset(variable_name, 0, 1024);
do {
......@@ -401,11 +420,11 @@ efivars_exit(void)
list_for_each(pos, &efivar_list) {
efivar = efivar_entry(pos);
remove_proc_entry(efivar->entry->name, efi_dir);
remove_proc_entry(efivar->entry->name, efi_vars_dir);
list_del(&efivar->list);
kfree(efivar);
}
remove_proc_entry(efi_dir->name, NULL);
remove_proc_entry(efi_vars_dir->name, efi_dir);
spin_unlock(&efivars_lock);
}
......
......@@ -140,8 +140,8 @@ GLOBAL_ENTRY(ia64_switch_to)
dep r20=0,in0,61,3 // physical address of "current"
;;
st8 [r22]=sp // save kernel stack pointer of old task
shr.u r26=r20,_PAGE_SIZE_64M
mov r16=1
shr.u r26=r20,KERNEL_PG_SHIFT
mov r16=KERNEL_PG_NUM
;;
cmp.ne p6,p7=r26,r16 // check >= 64M && < 128M
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
......@@ -175,7 +175,7 @@ GLOBAL_ENTRY(ia64_switch_to)
;;
srlz.d
or r23=r25,r20 // construct PA | page properties
mov r25=_PAGE_SIZE_64M<<2
mov r25=KERNEL_PG_SHIFT<<2
;;
mov cr.itir=r25
mov cr.ifa=in0 // VA of next task...
......@@ -212,23 +212,20 @@ GLOBAL_ENTRY(save_switch_stack)
.save @priunat,r17
mov r17=ar.unat // preserve caller's
.body
#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
#if !(defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
adds r3=80,sp
;;
lfetch.fault.excl.nt1 [r3],128
#endif
mov ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
#if !(defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
adds r2=16+128,sp
;;
lfetch.fault.excl.nt1 [r2],128
lfetch.fault.excl.nt1 [r3],128
#endif
adds r14=SW(R4)+16,sp
#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
#if !(defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
;;
lfetch.fault.excl [r2]
lfetch.fault.excl [r3]
......@@ -325,8 +322,7 @@ ENTRY(load_switch_stack)
.prologue
.altrp b7
.body
#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
#if !(defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
lfetch.fault.nt1 [sp]
#endif
......@@ -496,15 +492,13 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#ifdef CONFIG_SMP
/*
* In SMP mode, we need to call invoke_schedule_tail to complete the scheduling process.
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task.
*/
br.call.sptk.few rp=invoke_schedule_tail
.ret8:
#endif
adds r2=IA64_TASK_PTRACE_OFFSET,r13
;;
ld8 r2=[r2]
......@@ -530,14 +524,9 @@ END(ia64_ret_from_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
cmp.eq p16,p0=r0,r0 // set the "first_time" flag
movl r15=PERCPU_ADDR+IA64_CPU_SOFTIRQ_ACTIVE_OFFSET // r15 = &cpu_data.softirq.active
;;
ld8 r2=[r15]
lfetch.fault [sp]
movl r14=.restart
;;
lfetch.fault [sp]
shr.u r3=r2,32 // r3 = cpu_data.softirq.mask
MOVBR(.ret.sptk,rp,r14,.restart)
.restart:
adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13
......@@ -546,37 +535,28 @@ GLOBAL_ENTRY(ia64_leave_kernel)
adds r19=IA64_TASK_PFM_NOTIFY_OFFSET,r13
#endif
;;
ld8 r17=[r17] // load current->need_resched
ld4 r18=[r18] // load current->sigpending
(p16) and r2=r2,r3 // r2 <- (softirq.active & softirq.mask)
;;
#ifdef CONFIG_PERFMON
ld8 r19=[r19] // load current->task.pfm_notify
#endif
(p16) cmp4.ne.unc p6,p0=r2,r0 // p6 <- (softirq.active & softirq.mask) != 0
(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
ld8 r17=[r17] // load current->need_resched
ld4 r18=[r18] // load current->sigpending
;;
(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
#ifdef CONFIG_PERFMON
cmp.ne p9,p0=r19,r0 // current->task.pfm_notify != 0?
#endif
cmp.ne p16,p0=r0,r0 // clear the "first_time" flag
(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
;;
# if __GNUC__ < 3
(p6) br.call.spnt.many b7=invoke_do_softirq
# else
(p6) br.call.spnt.many b7=do_softirq
# endif
adds r2=PT(R8)+16,r12
adds r3=PT(R9)+16,r12
#ifdef CONFIG_PERFMON
(p9) br.call.spnt.many b7=pfm_overflow_notify
#endif
# if __GNUC__ < 3
#if __GNUC__ < 3
(p7) br.call.spnt.many b7=invoke_schedule
#else
(p7) br.call.spnt.many b7=schedule
#endif
adds r2=PT(R8)+16,r12
adds r3=PT(R9)+16,r12
(p8) br.call.spnt.many b7=handle_signal_delivery // check & deliver pending signals
;;
// start restoring the state saved on the kernel stack (struct pt_regs):
......@@ -634,14 +614,6 @@ GLOBAL_ENTRY(ia64_leave_kernel)
;;
bsw.0 // switch back to bank 0
;;
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
nop.i 0x0
;;
nop.i 0x0
;;
nop.i 0x0
;;
#endif
adds r16=16,r12
adds r17=24,r12
;;
......@@ -792,7 +764,6 @@ ENTRY(handle_syscall_error)
br.cond.sptk.many ia64_leave_kernel
END(handle_syscall_error)
# ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
......@@ -809,29 +780,7 @@ ENTRY(invoke_schedule_tail)
br.ret.sptk.many rp
END(invoke_schedule_tail)
# endif /* CONFIG_SMP */
#if __GNUC__ < 3
/*
* Invoke do_softirq() while preserving in0-in7, which may be needed
* in case a system call gets restarted. Note that declaring do_softirq()
* with asmlinkage() is NOT enough because that will only preserve as many
* registers as there are formal arguments.
*
* XXX fix me: with gcc 3.0, we won't need this anymore because syscall_linkage
* renders all eight input registers (in0-in7) as "untouchable".
*/
ENTRY(invoke_do_softirq)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,0,0
mov loc0=rp
;;
.body
br.call.sptk.few rp=do_softirq
.ret13: mov ar.pfs=loc1
mov rp=loc0
br.ret.sptk.many rp
END(invoke_do_softirq)
/*
* Invoke schedule() while preserving in0-in7, which may be needed
......@@ -1187,7 +1136,7 @@ sys_call_table:
data8 sys_newfstat
data8 sys_clone2
data8 sys_getdents64
data8 ia64_ni_syscall // 1215
data8 sys_getunwind // 1215
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall
......
#include <linux/config.h>
/* XXX fixme */
#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC)
#if defined(CONFIG_ITANIUM_B1_SPECIFIC)
# define MOVBR(type,br,gr,lbl) mov br=gr
#else
# define MOVBR(type,br,gr,lbl) mov##type br=gr,lbl
......
......@@ -20,7 +20,7 @@
#define MB (1024*1024UL)
#define NUM_MEM_DESCS 2
#define NUM_MEM_DESCS 3
static char fw_mem[( sizeof(struct ia64_boot_param)
+ sizeof(efi_system_table_t)
......@@ -121,68 +121,63 @@ offtime (unsigned long t, efi_time_t *tp)
*/
extern void pal_emulator_static (void);
asm ("
.proc pal_emulator_static
pal_emulator_static:
mov r8=-1
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
movl r9=0x100000000 /* tc.base */
movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp
1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;;
1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */
mov r10=0 /* hints */
mov r11=0
br.cond.sptk.few rp
1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f
mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */
;;
mov ar.lc=r8
movl r8=0xe000000000000000
;;
.loop: fc r8
add r8=32,r8
br.cloop.sptk.few .loop
sync.i
;;
srlz.i
;;
mov ar.lc=r9
mov r8=r0
1: br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
.endp pal_emulator_static\n");
asm (
" .proc pal_emulator_static\n"
"pal_emulator_static:"
" mov r8=-1\n"
" mov r9=256\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n"
"(p6) br.cond.sptk.few static\n"
" ;;\n"
" mov r9=512\n"
" ;;\n"
" cmp.gtu p6,p7=r9,r28\n"
"(p6) br.cond.sptk.few stacked\n"
" ;;\n"
"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" ;;\n"
" mov r8=0 /* status = 0 */\n"
" movl r9=0x100000000 /* tc.base */\n"
" movl r10=0x0000000200000003 /* count[0], count[1] */\n"
" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" movl r9 =0x100000064 /* proc_ratio (1/100) */\n"
" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n"
" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n"
" ;;\n"
"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r8=0 /* status = 0 */\n"
" mov r9=96 /* num phys stacked */\n"
" mov r10=0 /* hints */\n"
" mov r11=0\n"
" br.cond.sptk.few rp\n"
"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n"
"(p7) br.cond.sptk.few 1f\n"
" mov r9=ar.lc\n"
" movl r8=524288 /* flush 512k million cache lines (16MB) */\n"
" ;;\n"
" mov ar.lc=r8\n"
" movl r8=0xe000000000000000\n"
" ;;\n"
".loop: fc r8\n"
" add r8=32,r8\n"
" br.cloop.sptk.few .loop\n"
" sync.i\n"
" ;;\n"
" srlz.i\n"
" ;;\n"
" mov ar.lc=r9\n"
" mov r8=r0\n"
"1: br.cond.sptk.few rp\n"
"stacked:\n"
" br.ret.sptk.few rp\n"
" .endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
......@@ -437,8 +432,17 @@ sys_fw_init (const char *args, int arglen)
sal_systab->checksum = -checksum;
/* fill in a memory descriptor: */
/* simulate free memory at physical address zero */
md = &efi_memmap[0];
md->type = EFI_BOOT_SERVICES_DATA;
md->pad = 0;
md->phys_addr = 0*MB;
md->virt_addr = 0;
md->num_pages = (1*MB) >> 12; /* 1MB (in 4KB pages) */
md->attribute = EFI_MEMORY_WB;
/* fill in a memory descriptor: */
md = &efi_memmap[1];
md->type = EFI_CONVENTIONAL_MEMORY;
md->pad = 0;
md->phys_addr = 2*MB;
......@@ -447,7 +451,7 @@ sys_fw_init (const char *args, int arglen)
md->attribute = EFI_MEMORY_WB;
/* descriptor for firmware emulator: */
md = &efi_memmap[1];
md = &efi_memmap[2];
md->type = EFI_PAL_CODE;
md->pad = 0;
md->phys_addr = 1*MB;
......@@ -462,7 +466,7 @@ sys_fw_init (const char *args, int arglen)
*/
/* descriptor for high memory (>4GB): */
md = &efi_memmap[2];
md = &efi_memmap[3];
md->type = EFI_CONVENTIONAL_MEMORY;
md->pad = 0;
md->phys_addr = 4096*MB;
......
......@@ -15,15 +15,23 @@
.section .text.gate,"ax"
.align PAGE_SIZE
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
# define RBS_BASE_OFF (16 + IA64_SIGFRAME_RBS_BASE_OFFSET)
# define SIGHANDLER_OFF (16 + IA64_SIGFRAME_HANDLER_OFFSET)
# define SIGCONTEXT_OFF (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET)
# define SIGINFO_OFF 16
# define SIGCONTEXT_OFF (SIGINFO_OFF + ((IA64_SIGINFO_SIZE + 15) & ~15))
# define FLAGS_OFF IA64_SIGCONTEXT_FLAGS_OFFSET
# define CFM_OFF IA64_SIGCONTEXT_CFM_OFFSET
# define FR6_OFF IA64_SIGCONTEXT_FR6_OFFSET
# define BSP_OFF IA64_SIGCONTEXT_AR_BSP_OFFSET
# define RNAT_OFF IA64_SIGCONTEXT_AR_RNAT_OFFSET
# define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET
# define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET
# define PR_OFF IA64_SIGCONTEXT_PR_OFFSET
# define RP_OFF IA64_SIGCONTEXT_B0_OFFSET
# define SP_OFF IA64_SIGCONTEXT_R12_OFFSET
# define base0 r2
# define base1 r3
/*
......@@ -31,17 +39,9 @@
*
* +===============================+
* | |
* // struct sigcontext //
* // struct sigframe //
* | |
* +===============================+ <-- sp+SIGCONTEXT_OFF
* | |
* // rest of siginfo //
* | |
* + +---------------+
* | | siginfo.code |
* +---------------+---------------+
* | siginfo.errno | siginfo.signo |
* +-------------------------------+ <-- sp+SIGINFO_OFF
* +-------------------------------+ <-- sp+16
* | 16 byte of scratch |
* | space |
* +-------------------------------+ <-- sp
......@@ -51,46 +51,60 @@
* incoming general register may be a NaT value (including sp, in which case the
* process ends up dying with a SIGSEGV).
*
* The first need to do is a cover to get the registers onto the backing store.
* Once that is done, we invoke the signal handler which may modify some of the
* machine state. After returning from the signal handler, we return control to
* the previous context by executing a sigreturn system call. A signal handler
* may call the rt_sigreturn() function to directly return to a given sigcontext.
* However, the user-level sigreturn() needs to do much more than calling the
* rt_sigreturn() system call as it needs to unwind the stack to restore preserved
* registers that may have been saved on the signal handler's call stack.
*
* On entry:
* r2 = signal number
* r3 = plabel of signal handler
* r15 = new register backing store
* [sp+16] = sigframe
* The first thing need to do is a cover to get the registers onto the backing
* store. Once that is done, we invoke the signal handler which may modify some
* of the machine state. After returning from the signal handler, we return
* control to the previous context by executing a sigreturn system call. A signal
* handler may call the rt_sigreturn() function to directly return to a given
* sigcontext. However, the user-level sigreturn() needs to do much more than
* calling the rt_sigreturn() system call as it needs to unwind the stack to
* restore preserved registers that may have been saved on the signal handler's
* call stack.
*/
GLOBAL_ENTRY(ia64_sigtramp)
ld8 r10=[r3],8 // get signal handler entry point
br.call.sptk.many rp=invoke_sighandler
END(ia64_sigtramp)
// describe the state that is active when we get here:
.prologue
.unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs)
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF
.savesp pr, PR_OFF+SIGCONTEXT_OFF
.savesp rp, RP_OFF+SIGCONTEXT_OFF
.vframesp SP_OFF+SIGCONTEXT_OFF
.body
ENTRY(invoke_sighandler)
ld8 gp=[r3] // get signal handler's global pointer
mov b6=r10
.prologue
adds base0=SIGHANDLER_OFF,sp
adds base1=RBS_BASE_OFF,sp
br.call.sptk.many rp=1f
1:
ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF) // get pointer to signal handler's plabel
ld8 r15=[base1],(ARG1_OFF-RBS_BASE_OFF) // get address of new RBS base (or NULL)
cover // push args in interrupted frame onto backing store
;;
.save ar.pfs, r8
alloc r8=ar.pfs,0,0,3,0 // get CFM0, EC0, and CPL0 into r8
ld8 out0=[base0],16 // load arg0 (signum)
;;
ld8 out1=[base1] // load arg1 (siginfop)
ld8 r10=[r17],8 // get signal handler entry point
;;
mov r17=ar.bsp // fetch ar.bsp
ld8 out2=[base0] // load arg2 (sigcontextp)
ld8 gp=[r17] // get signal handler's global pointer
cmp.ne p8,p0=r15,r0 // do we need to switch the rbs?
mov out0=r2 // signal number
mov.m r17=ar.bsp // fetch ar.bsp
.spillsp.p p8, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
(p8) br.cond.spnt.few setup_rbs // yup -> (clobbers r14 and r16)
back_from_setup_rbs:
adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
;;
.spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF
st8 [base0]=r17,(CFM_OFF-BSP_OFF) // save sc_ar_bsp
dep r8=0,r8,38,26 // clear EC0, CPL0 and reserved bits
dep r8=0,r8,38,26 // clear EC0, CPL0 and reserved bits
adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
;;
.spillsp ar.pfs, CFM_OFF
st8 [base0]=r8 // save CFM0
adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
;;
......@@ -99,14 +113,13 @@ back_from_setup_rbs:
;;
stf.spill [base0]=f8,32
stf.spill [base1]=f9,32
mov b6=r10
;;
stf.spill [base0]=f10,32
stf.spill [base1]=f11,32
adds out1=SIGINFO_OFF,sp // siginfo pointer
;;
stf.spill [base0]=f12,32
stf.spill [base1]=f13,32
adds out2=SIGCONTEXT_OFF,sp // sigcontext pointer
;;
stf.spill [base0]=f14,32
stf.spill [base1]=f15,32
......@@ -140,9 +153,8 @@ back_from_restore_rbs:
ldf.fill f15=[base1],32
mov r15=__NR_rt_sigreturn
break __BREAK_SYSCALL
END(invoke_sighandler)
ENTRY(setup_rbs)
setup_rbs:
flushrs // must be first in insn
mov ar.rsc=0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
......@@ -150,13 +162,13 @@ ENTRY(setup_rbs)
mov r14=ar.rnat // get rnat as updated by flushrs
mov ar.bspstore=r15 // set new register backing store area
;;
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
st8 [r16]=r14 // save sc_ar_rnat
mov ar.rsc=0xf // set RSE into eager mode, pl 3
invala // invalidate ALAT
br.cond.sptk.many back_from_setup_rbs
END(setup_rbs)
ENTRY(restore_rbs)
restore_rbs:
flushrs
mov ar.rsc=0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
......@@ -168,4 +180,4 @@ ENTRY(restore_rbs)
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk.many back_from_restore_rbs
END(restore_rbs)
END(ia64_sigtramp)
......@@ -63,17 +63,17 @@ start_ap:
* that maps the kernel's text and data:
*/
rsm psr.i | psr.ic
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (_PAGE_SIZE_64M << 2))
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (KERNEL_PG_SHIFT << 2))
;;
srlz.i
mov r18=_PAGE_SIZE_64M<<2
movl r17=PAGE_OFFSET + 64*1024*1024
mov r18=KERNEL_PG_SHIFT<<2
movl r17=PAGE_OFFSET + KERNEL_PG_NUM*KERNEL_PG_SIZE
;;
mov rr[r17]=r16
mov cr.itir=r18
mov cr.ifa=r17
mov r16=IA64_TR_KERNEL
movl r18=(64*1024*1024 | PAGE_KERNEL)
movl r18=(KERNEL_PG_NUM*KERNEL_PG_SIZE | PAGE_KERNEL)
;;
srlz.i
;;
......@@ -111,7 +111,7 @@ start_ap:
;;
#ifdef CONFIG_IA64_EARLY_PRINTK
mov r3=(6<<8) | (_PAGE_SIZE_64M<<2)
mov r3=(6<<8) | (KERNEL_PG_SHIFT<<2)
movl r2=6<<61
;;
mov rr[r2]=r3
......@@ -123,11 +123,12 @@ start_ap:
#define isAP p2 // are we an Application Processor?
#define isBP p3 // are we the Bootstrap Processor?
#ifdef CONFIG_SMP
/*
* Find the init_task for the currently booting CPU. At poweron, and in
* UP mode, cpu_now_booting is 0.
* UP mode, cpucount is 0.
*/
movl r3=cpu_now_booting
movl r3=cpucount
;;
ld4 r3=[r3] // r3 <- smp_processor_id()
movl r2=init_tasks
......@@ -135,6 +136,11 @@ start_ap:
shladd r2=r3,3,r2
;;
ld8 r2=[r2]
#else
mov r3=0
movl r2=init_task_union
;;
#endif
cmp4.ne isAP,isBP=r3,r0
;; // RAW on r2
extr r3=r2,0,61 // r3 == phys addr of task struct
......@@ -182,7 +188,7 @@ alive_msg:
#endif /* CONFIG_IA64_EARLY_PRINTK */
#ifdef CONFIG_SMP
(isAP) br.call.sptk.few rp=smp_callin
(isAP) br.call.sptk.few rp=start_secondary
.ret0:
(isAP) br.cond.sptk.few self
#endif
......@@ -212,8 +218,7 @@ GLOBAL_ENTRY(ia64_save_debug_regs)
add r19=IA64_NUM_DBG_REGS*8,in0
;;
1: mov r16=dbr[r18]
#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_C0_SPECIFIC)
#if defined(CONFIG_ITANIUM_C0_SPECIFIC)
;;
srlz.d
#endif
......@@ -230,8 +235,7 @@ END(ia64_save_debug_regs)
GLOBAL_ENTRY(ia64_load_debug_regs)
alloc r16=ar.pfs,1,0,0,0
#if !(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
#if !(defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC))
lfetch.nta [in0]
#endif
mov r20=ar.lc // preserve ar.lc
......@@ -244,8 +248,7 @@ GLOBAL_ENTRY(ia64_load_debug_regs)
add r18=1,r18
;;
mov dbr[r18]=r16
#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC) \
|| defined(CONFIG_ITANIUM_C0_SPECIFIC)
#if defined(CONFIG_ITANIUM_BSTEP_SPECIFIC) || defined(CONFIG_ITANIUM_C0_SPECIFIC)
;;
srlz.d
#endif
......
......@@ -7,6 +7,7 @@
#include <linux/string.h>
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL(memmove);
......@@ -30,6 +31,9 @@ EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
#include <linux/interrupt.h>
EXPORT_SYMBOL(probe_irq_mask);
#include <linux/in6.h>
#include <asm/checksum.h>
/* not coded yet?? EXPORT_SYMBOL(csum_ipv6_magic); */
......@@ -48,15 +52,14 @@ EXPORT_SYMBOL_NOVERS(__down);
EXPORT_SYMBOL_NOVERS(__down_interruptible);
EXPORT_SYMBOL_NOVERS(__down_trylock);
EXPORT_SYMBOL_NOVERS(__up);
EXPORT_SYMBOL_NOVERS(__down_read_failed);
EXPORT_SYMBOL_NOVERS(__down_write_failed);
EXPORT_SYMBOL_NOVERS(__rwsem_wake);
#include <asm/page.h>
EXPORT_SYMBOL(clear_page);
#include <asm/processor.h>
EXPORT_SYMBOL(cpu_data);
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(_cpu_data);
# endif
EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h>
......@@ -78,6 +81,7 @@ EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#include <linux/smp.h>
EXPORT_SYMBOL(smp_num_cpus);
......@@ -137,3 +141,8 @@ EXPORT_SYMBOL(ia64_pal_call_static);
extern struct efi efi;
EXPORT_SYMBOL(efi);
#include <linux/proc_fs.h>
extern struct proc_dir_entry *efi_dir;
EXPORT_SYMBOL(efi_dir);
......@@ -20,7 +20,7 @@
* Here is what the interrupt logic between a PCI device and the CPU looks like:
*
* (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD). The
* device is uniquely identified by its bus-, device-, and slot-number (the function
* device is uniquely identified by its bus--, and slot-number (the function
* number does not matter here because all functions share the same interrupt
* lines).
*
......@@ -51,6 +51,7 @@
#include <linux/irq.h>
#include <asm/acpi-ext.h>
#include <asm/acpikcfg.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/iosapic.h>
......@@ -59,9 +60,6 @@
#include <asm/ptrace.h>
#include <asm/system.h>
#ifdef CONFIG_ACPI_KERNEL_CONFIG
# include <asm/acpikcfg.h>
#endif
#undef DEBUG_IRQ_ROUTING
......@@ -207,7 +205,45 @@ unmask_irq (unsigned int irq)
static void
iosapic_set_affinity (unsigned int irq, unsigned long mask)
{
printk("iosapic_set_affinity: not implemented yet\n");
#ifdef CONFIG_SMP
unsigned long flags;
u32 high32, low32;
int dest, pin;
char *addr;
mask &= (1UL << smp_num_cpus) - 1;
if (!mask || irq >= IA64_NUM_VECTORS)
return;
dest = cpu_physical_id(ffz(~mask));
pin = iosapic_irq[irq].pin;
addr = iosapic_irq[irq].addr;
if (pin < 0)
return; /* not an IOSAPIC interrupt */
/* dest contains both id and eid */
high32 = dest << IOSAPIC_DEST_SHIFT;
spin_lock_irqsave(&iosapic_lock, flags);
{
/* get current delivery mode by reading the low32 */
writel(IOSAPIC_RTE_LOW(pin), addr + IOSAPIC_REG_SELECT);
low32 = readl(addr + IOSAPIC_WINDOW);
/* change delivery mode to fixed */
low32 &= ~(7 << IOSAPIC_DELIVERY_SHIFT);
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
writel(IOSAPIC_RTE_HIGH(pin), addr + IOSAPIC_REG_SELECT);
writel(high32, addr + IOSAPIC_WINDOW);
writel(IOSAPIC_RTE_LOW(pin), addr + IOSAPIC_REG_SELECT);
writel(low32, addr + IOSAPIC_WINDOW);
}
spin_unlock_irqrestore(&iosapic_lock, flags);
#endif
}
/*
......@@ -330,7 +366,7 @@ iosapic_register_legacy_irq (unsigned long irq,
}
void __init
iosapic_init (unsigned long phys_addr, unsigned int base_irq)
iosapic_init (unsigned long phys_addr, unsigned int base_irq, int pcat_compat)
{
struct hw_interrupt_type *irq_type;
int i, irq, max_pin, vector;
......@@ -348,13 +384,7 @@ iosapic_init (unsigned long phys_addr, unsigned int base_irq)
/*
* Fetch the PCI interrupt routing table:
*/
#ifdef CONFIG_ACPI_KERNEL_CONFIG
acpi_cf_get_pci_vectors(&pci_irq.route, &pci_irq.num_routes);
#else
pci_irq.route =
(struct pci_vector_struct *) __va(ia64_boot_param->pci_vectors);
pci_irq.num_routes = ia64_boot_param->num_pci_vectors;
#endif
}
addr = ioremap(phys_addr, 0);
......@@ -365,7 +395,7 @@ iosapic_init (unsigned long phys_addr, unsigned int base_irq)
printk("IOSAPIC: version %x.%x, address 0x%lx, IRQs 0x%02x-0x%02x\n",
(ver & 0xf0) >> 4, (ver & 0x0f), phys_addr, base_irq, base_irq + max_pin);
if (base_irq == 0)
if ((base_irq == 0) && pcat_compat)
/*
* Map the legacy ISA devices into the IOSAPIC data. Some of these may
* get reprogrammed later on with data from the ACPI Interrupt Source
......
......@@ -626,6 +626,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
desc->handler->end(irq);
spin_unlock(&desc->lock);
}
if (local_softirq_pending())
do_softirq();
return 1;
}
......
......@@ -72,6 +72,11 @@ void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
unsigned long saved_tpr;
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
#else
# define IS_RESCHEDULE(vec) (0)
#endif
#if IRQ_DEBUG
{
......@@ -110,24 +115,25 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
*/
saved_tpr = ia64_get_tpr();
ia64_srlz_d();
do {
ia64_set_tpr(vector);
ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs);
/*
* Disable interrupts and send EOI:
*/
local_irq_disable();
ia64_set_tpr(saved_tpr);
while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (!IS_RESCHEDULE(vector)) {
ia64_set_tpr(vector);
ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs);
/*
* Disable interrupts and send EOI:
*/
local_irq_disable();
ia64_set_tpr(saved_tpr);
}
ia64_eoi();
vector = ia64_get_ivr();
} while (vector != IA64_SPURIOUS_INT_VECTOR);
}
}
#ifdef CONFIG_SMP
extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction ipi_irqaction = {
......@@ -147,7 +153,7 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
if (irq_to_vector(irq) == vec) {
desc = irq_desc(irq);
desc->status |= IRQ_PER_CPU;
desc->handler = &irq_type_ia64_sapic;
desc->handler = &irq_type_ia64_lsapic;
if (action)
setup_irq(irq, action);
}
......
/*
* SAPIC Interrupt Controller
* LSAPIC Interrupt Controller
*
* This takes care of interrupts that are generated by the CPU's
* internal Streamlined Advanced Programmable Interrupt Controller
* (SAPIC), such as the ITC and IPI interrupts.
*
* (LSAPIC), such as the ITC and IPI interrupts.
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000 Hewlett-Packard Co
......@@ -15,24 +15,24 @@
#include <linux/irq.h>
static unsigned int
sapic_noop_startup (unsigned int irq)
lsapic_noop_startup (unsigned int irq)
{
return 0;
}
static void
sapic_noop (unsigned int irq)
lsapic_noop (unsigned int irq)
{
/* nuthing to do... */
}
struct hw_interrupt_type irq_type_ia64_sapic = {
typename: "SAPIC",
startup: sapic_noop_startup,
shutdown: sapic_noop,
enable: sapic_noop,
disable: sapic_noop,
ack: sapic_noop,
end: sapic_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) sapic_noop
struct hw_interrupt_type irq_type_ia64_lsapic = {
typename: "LSAPIC",
startup: lsapic_noop_startup,
shutdown: lsapic_noop,
enable: lsapic_noop,
disable: lsapic_noop,
ack: lsapic_noop,
end: lsapic_noop,
set_affinity: (void (*)(unsigned int, unsigned long)) lsapic_noop
};
......@@ -9,16 +9,14 @@
* 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
*/
/*
* This file defines the interrupt vector table used by the CPU.
* This file defines the interruption vector table used by the CPU.
* It does not include one entry per possible cause of interruption.
*
* External interrupts only use 1 entry. All others are internal interrupts
*
* The first 20 entries of the table contain 64 bundles each while the
* remaining 48 entries contain only 16 bundles each.
*
* The 64 bundles are used to allow inlining the whole handler for critical
* interrupts like TLB misses.
* interruptions like TLB misses.
*
* For each entry, the comment is as follows:
*
......@@ -27,7 +25,7 @@
* entry number ---------/ / / /
* size of the entry -------------/ / /
* vector name -------------------------------------/ /
* related interrupts (what is the real interrupt?) ----------/
* interruptions triggering this vector ----------------------/
*
* The table is 32KB in size and must be aligned on 32KB boundary.
* (The CPU ignores the 15 lower bits of the address)
......@@ -363,7 +361,7 @@ ENTRY(page_fault)
;;
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collectin is on
;;
(p15) ssm psr.i // restore psr.i
movl r14=ia64_leave_kernel
......@@ -536,8 +534,7 @@ ENTRY(iaccess_bit)
;;
1: ld8 r18=[r17]
;;
# if defined(CONFIG_IA32_SUPPORT) && \
(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B0_SPECIFIC))
# if defined(CONFIG_IA32_SUPPORT) && defined(CONFIG_ITANIUM_B0_SPECIFIC)
/*
* Erratum 85 (Access bit fault could be reported before page not present fault)
* If the PTE is indicates the page is not present, then just turn this into a
......@@ -567,8 +564,7 @@ ENTRY(iaccess_bit)
;;
1: ld8 r18=[r17]
;;
# if defined(CONFIG_IA32_SUPPORT) && \
(defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_B0_SPECIFIC))
# if defined(CONFIG_IA32_SUPPORT) && defined(CONFIG_ITANIUM_B0_SPECIFIC)
/*
* Erratum 85 (Access bit fault could be reported before page not present fault)
* If the PTE is indicates the page is not present, then just turn this into a
......@@ -650,7 +646,7 @@ ENTRY(break_fault)
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
;;
(p15) ssm psr.i // restore psr.i
......@@ -702,7 +698,7 @@ ENTRY(break_fault)
st8 [r16]=r18 // store new value for cr.isr
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.call.sptk.many rp=ia64_trace_syscall // rp will be overwritten (ignored)
br.cond.sptk.many ia64_trace_syscall
// NOT REACHED
END(break_fault)
......@@ -724,11 +720,14 @@ ENTRY(demine_args)
tnat.nz p15,p0=in7
(p11) mov in3=-1
tnat.nz p8,p0=r15 // demining r15 is not a must, but it is safer
(p12) mov in4=-1
(p13) mov in5=-1
;;
(p14) mov in6=-1
(p15) mov in7=-1
(p8) mov r15=-1
br.ret.sptk.many rp
END(demine_args)
......@@ -790,7 +789,7 @@ ENTRY(dispatch_illegal_op_fault)
SAVE_MIN_WITH_COVER
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST
......@@ -839,7 +838,7 @@ ENTRY(dispatch_to_ia32_handler)
mov r14=cr.isr
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i
adds r3=8,r2 // Base pointer for SAVE_REST
......@@ -890,8 +889,7 @@ ENTRY(dispatch_to_ia32_handler)
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
;;
br.call.sptk.many rp=ia32_trace_syscall // rp will be overwritten (ignored)
br.cond.sptk.many ia32_trace_syscall
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
......@@ -928,7 +926,7 @@ ENTRY(non_syscall)
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
movl r15=ia64_leave_kernel
......@@ -961,7 +959,7 @@ ENTRY(dispatch_unaligned_handler)
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer
......@@ -1003,7 +1001,7 @@ ENTRY(dispatch_to_fault_handler)
;;
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interrupt collection is enabled
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST
......
......@@ -27,6 +27,7 @@
#include <asm/mca.h>
#include <asm/irq.h>
#include <asm/machvec.h>
typedef struct ia64_fptr {
......
......@@ -672,7 +672,7 @@ ia64_monarch_init_handler:
//
mov r17=cr.lid
// XXX fix me: this is wrong: hard_smp_processor_id() is a pair of lid/eid
movl r18=__cpu_physical_id
movl r18=ia64_cpu_to_sapicid
;;
dep r18=0,r18,61,3 // convert to physical address
;;
......
......@@ -235,12 +235,6 @@
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
# define STOPS nop.i 0x0;; nop.i 0x0;; nop.i 0x0;;
#else
# define STOPS
#endif
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,) STOPS
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19) STOPS
#define SAVE_MIN DO_SAVE_MIN( , mov rCRIFS=r0, ) STOPS
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov rCRIFS=r0, )
/*
* pci.c - Low-Level PCI Access in IA-64
*
*
* Derived from bios32.c of i386 tree.
*/
#include <linux/config.h>
......@@ -53,7 +53,7 @@ struct pci_fixup pcibios_fixups[] = {
#define PCI_CONFIG_ADDRESS(dev, where) \
(((u64) dev->bus->number << 16) | ((u64) (dev->devfn & 0xff) << 8) | (where & 0xff))
static int
static int
pci_conf_read_config_byte(struct pci_dev *dev, int where, u8 *value)
{
s64 status;
......@@ -64,7 +64,7 @@ pci_conf_read_config_byte(struct pci_dev *dev, int where, u8 *value)
return status;
}
static int
static int
pci_conf_read_config_word(struct pci_dev *dev, int where, u16 *value)
{
s64 status;
......@@ -75,7 +75,7 @@ pci_conf_read_config_word(struct pci_dev *dev, int where, u16 *value)
return status;
}
static int
static int
pci_conf_read_config_dword(struct pci_dev *dev, int where, u32 *value)
{
s64 status;
......@@ -86,19 +86,19 @@ pci_conf_read_config_dword(struct pci_dev *dev, int where, u32 *value)
return status;
}
static int
static int
pci_conf_write_config_byte (struct pci_dev *dev, int where, u8 value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 1, value);
}
static int
static int
pci_conf_write_config_word (struct pci_dev *dev, int where, u16 value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 2, value);
}
static int
static int
pci_conf_write_config_dword (struct pci_dev *dev, int where, u32 value)
{
return ia64_sal_pci_config_write(PCI_CONFIG_ADDRESS(dev, where), 4, value);
......@@ -116,7 +116,7 @@ struct pci_ops pci_conf = {
/*
* Initialization. Uses the SAL interface
*/
void __init
void __init
pcibios_init (void)
{
# define PCI_BUSES_TO_SCAN 255
......@@ -125,7 +125,7 @@ pcibios_init (void)
platform_pci_fixup(0); /* phase 0 initialization (before PCI bus has been scanned) */
printk("PCI: Probing PCI hardware\n");
for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, &pci_conf, NULL);
platform_pci_fixup(1); /* phase 1 initialization (after PCI bus has been scanned) */
......@@ -146,14 +146,14 @@ void __init
pcibios_update_resource (struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
unsigned long where, size;
u32 reg;
unsigned long where, size;
u32 reg;
where = PCI_BASE_ADDRESS_0 + (resource * 4);
size = res->end - res->start;
pci_read_config_dword(dev, where, &reg);
reg = (reg & size) | (((u32)(res->start - root->start)) & ~size);
pci_write_config_dword(dev, where, reg);
where = PCI_BASE_ADDRESS_0 + (resource * 4);
size = res->end - res->start;
pci_read_config_dword(dev, where, &reg);
reg = (reg & size) | (((u32)(res->start - root->start)) & ~size);
pci_write_config_dword(dev, where, reg);
/* ??? FIXME -- record old value for shutdown. */
}
......@@ -190,7 +190,7 @@ pcibios_align_resource (void *data, struct resource *res, unsigned long size)
/*
* PCI BIOS setup, always defaults to SAL interface
*/
char * __init
char * __init
pcibios_setup (char *str)
{
return NULL;
......
......@@ -32,6 +32,7 @@
#include <asm/processor.h>
#include <asm/signal.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/delay.h> /* for ia64_get_itc() */
......@@ -467,7 +468,7 @@ pfm_smpl_buffer_alloc(pfm_context_t *ctx, unsigned long which_pmds, unsigned lon
if (size > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN;
/* find some free area in address space */
addr = get_unmapped_area(NULL, 0, size, 0, 0);
addr = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE);
if (!addr) goto no_addr;
DBprintk((" entries=%ld aligned size=%ld, unmapped @0x%lx\n", entries, size, addr));
......@@ -573,12 +574,8 @@ pfx_is_sane(pfreq_context_t *pfx)
/* cannot send to process 1, 0 means do not notify */
if (pfx->notify_pid < 0 || pfx->notify_pid == 1) return 0;
/* asked for sampling, but nothing to record ! */
if (pfx->smpl_entries > 0 && pfm_smpl_entry_size(&pfx->smpl_regs, 1) == 0) return 0;
/* probably more to add here */
return 1;
}
......@@ -786,26 +783,22 @@ pfm_read_pmds(struct task_struct *ta, perfmon_req_t *req, int count)
/* XXX: ctx locking may be required here */
for (i = 0; i < count; i++, req++) {
int k;
if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
if (!PMD_IS_IMPL(tmp.pfr_reg.reg_num)) return -EINVAL;
k = tmp.pfr_reg.reg_num - PMU_FIRST_COUNTER;
if (PMD_IS_COUNTER(tmp.pfr_reg.reg_num)) {
if (ta == current){
val = ia64_get_pmd(tmp.pfr_reg.reg_num);
} else {
val = th->pmd[k];
val = th->pmd[tmp.pfr_reg.reg_num];
}
val &= pmu_conf.perf_ovfl_val;
/*
* lower part of .val may not be zero, so we must be an addition because of
* residual count (see update_counters).
*/
val += ctx->ctx_pmds[k].val;
val += ctx->ctx_pmds[tmp.pfr_reg.reg_num - PMU_FIRST_COUNTER].val;
} else {
/* for now */
if (ta != current) return -EINVAL;
......@@ -1646,7 +1639,7 @@ perfmon_init (void)
pmu_conf.pfm_is_disabled = 1;
printk("perfmon: version %s\n", PFM_VERSION);
printk("perfmon: version %s (sampling format v%d)\n", PFM_VERSION, PFM_SMPL_HDR_VERSION);
printk("perfmon: Interrupt vectored to %u\n", IA64_PERFMON_VECTOR);
if ((status=ia64_pal_perf_mon_info(pmu_conf.impl_regs, &pm_info)) != 0) {
......@@ -1658,11 +1651,8 @@ perfmon_init (void)
pmu_conf.num_pmds = find_num_pm_regs(pmu_conf.impl_regs);
pmu_conf.num_pmcs = find_num_pm_regs(&pmu_conf.impl_regs[4]);
printk("perfmon: Counters are %d bits\n", pm_info.pal_perf_mon_info_s.width);
printk("perfmon: Maximum counter value 0x%lx\n", pmu_conf.perf_ovfl_val);
printk("perfmon: %ld PMC/PMD pairs\n", pmu_conf.max_counters);
printk("perfmon: %ld PMCs, %ld PMDs\n", pmu_conf.num_pmcs, pmu_conf.num_pmds);
printk("perfmon: Sampling format v%d\n", PFM_SMPL_HDR_VERSION);
printk("perfmon: %d bits counters (max value 0x%lx)\n", pm_info.pal_perf_mon_info_s.width, pmu_conf.perf_ovfl_val);
printk("perfmon: %ld PMC/PMD pairs, %ld PMCs, %ld PMDs\n", pmu_conf.max_counters, pmu_conf.num_pmcs, pmu_conf.num_pmds);
/* sanity check */
if (pmu_conf.num_pmds >= IA64_NUM_PMD_REGS || pmu_conf.num_pmcs >= IA64_NUM_PMC_REGS) {
......
......@@ -143,7 +143,7 @@ ia64_save_extra (struct task_struct *task)
pfm_save_regs(task);
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
ia32_save_state(&task->thread);
ia32_save_state(task);
}
void
......@@ -156,7 +156,7 @@ ia64_load_extra (struct task_struct *task)
pfm_load_regs(task);
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
ia32_load_state(&task->thread);
ia32_load_state(task);
}
/*
......@@ -282,10 +282,11 @@ copy_thread (int nr, unsigned long clone_flags,
* state from the current task to the new task
*/
if (IS_IA32_PROCESS(ia64_task_regs(current)))
ia32_save_state(&p->thread);
ia32_save_state(p);
#endif
#ifdef CONFIG_PERFMON
if (current->thread.pfm_context)
p->thread.pfm_pend_notify = 0;
if (p->thread.pfm_context)
retval = pfm_inherit(p);
#endif
return retval;
......@@ -294,11 +295,10 @@ copy_thread (int nr, unsigned long clone_flags,
void
do_copy_regs (struct unw_frame_info *info, void *arg)
{
unsigned long ar_bsp, addr, mask, sp, nat_bits = 0, ip, ar_rnat;
unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
elf_greg_t *dst = arg;
struct pt_regs *pt;
char nat;
long val;
int i;
memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
......@@ -309,17 +309,13 @@ do_copy_regs (struct unw_frame_info *info, void *arg)
unw_get_sp(info, &sp);
pt = (struct pt_regs *) (sp + 16);
ar_bsp = ia64_get_user_bsp(current, pt);
urbs_end = ia64_get_user_rbs_end(current, pt, &cfm);
/*
* Write portion of RSE backing store living on the kernel stack to the VM of the
* process.
*/
for (addr = pt->ar_bspstore; addr < ar_bsp; addr += 8)
if (ia64_peek(current, ar_bsp, addr, &val) == 0)
access_process_vm(current, addr, &val, sizeof(val), 1);
if (ia64_sync_user_rbs(current, info->sw, pt->ar_bspstore, urbs_end) < 0)
return;
ia64_peek(current, ar_bsp, (long) ia64_rse_rnat_addr((long *) addr - 1), &ar_rnat);
ia64_peek(current, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
&ar_rnat);
/*
* coredump format:
......@@ -347,7 +343,7 @@ do_copy_regs (struct unw_frame_info *info, void *arg)
unw_get_rp(info, &ip);
dst[42] = ip + ia64_psr(pt)->ri;
dst[43] = pt->cr_ifs & 0x3fffffffff;
dst[43] = cfm;
dst[44] = pt->cr_ipsr & IA64_PSR_UM;
unw_get_ar(info, UNW_AR_RSC, &dst[45]);
......@@ -355,7 +351,7 @@ do_copy_regs (struct unw_frame_info *info, void *arg)
* For bsp and bspstore, unw_get_ar() would return the kernel
* addresses, but we need the user-level addresses instead:
*/
dst[46] = ar_bsp;
dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
dst[47] = pt->ar_bspstore;
dst[48] = ar_rnat;
unw_get_ar(info, UNW_AR_CCV, &dst[49]);
......@@ -528,13 +524,11 @@ machine_restart (char *restart_cmd)
void
machine_halt (void)
{
printk("machine_halt: need PAL or ACPI version here!!\n");
machine_restart(0);
}
void
machine_power_off (void)
{
printk("machine_power_off: unimplemented (need ACPI version here)\n");
machine_halt ();
if (pm_power_off)
pm_power_off();
}
This diff is collapsed.
This diff is collapsed.
struct sigframe {
/*
* Place signal handler args where user-level unwinder can find them easily.
* DO NOT MOVE THESE. They are part of the IA-64 Linux ABI and there is
* user-level code that depends on their presence!
*/
unsigned long arg0; /* signum */
unsigned long arg1; /* siginfo pointer */
unsigned long arg2; /* sigcontext pointer */
unsigned long rbs_base; /* base of new register backing store (or NULL) */
void *handler; /* pointer to the plabel of the signal handler */
struct siginfo info;
struct sigcontext sc;
};
......@@ -25,6 +25,8 @@
#include <asm/rse.h>
#include <asm/sigcontext.h>
#include "sigframe.h"
#define DEBUG_SIG 0
#define STACK_ALIGN 16 /* minimal alignment for stack pointer */
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
......@@ -43,11 +45,6 @@ struct sigscratch {
struct pt_regs pt;
};
struct sigframe {
struct siginfo info;
struct sigcontext sc;
};
extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); /* forward decl */
long
......@@ -380,7 +377,13 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
err = copy_siginfo_to_user(&frame->info, info);
err = __put_user(sig, &frame->arg0);
err |= __put_user(&frame->info, &frame->arg1);
err |= __put_user(&frame->sc, &frame->arg2);
err |= __put_user(new_rbs, &frame->rbs_base);
err |= __put_user(ka->sa.sa_handler, &frame->handler);
err |= copy_siginfo_to_user(&frame->info, info);
err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp);
err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size);
......@@ -390,19 +393,16 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
if (err)
goto give_sigsegv;
scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
scr->pt.r2 = sig; /* signal number */
scr->pt.r3 = (unsigned long) ka->sa.sa_handler; /* addr. of handler's proc desc */
scr->pt.r15 = new_rbs;
scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
scr->pt.cr_iip = tramp_addr;
ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
/*
* Note: this affects only the NaT bits of the scratch regs
* (the ones saved in pt_regs), which is exactly what we want.
* Note: this affects only the NaT bits of the scratch regs (the ones saved in
* pt_regs), which is exactly what we want.
*/
scr->scratch_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */
scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%lx\n",
......
This diff is collapsed.
This diff is collapsed.
......@@ -22,16 +22,18 @@
#define COLOR_ALIGN(addr) (((addr) + SHMLBA - 1) & ~(SHMLBA - 1))
unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct * vmm;
long map_shared = (flags & MAP_SHARED);
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (flags & MAP_SHARED)
if (map_shared)
addr = COLOR_ALIGN(addr);
else
addr = PAGE_ALIGN(addr);
......@@ -45,7 +47,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
if (map_shared)
addr = COLOR_ALIGN(addr);
}
}
......@@ -176,11 +178,22 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
unsigned long roff;
struct file *file = 0;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
return -EBADF;
if (!file->f_op || !file->f_op->mmap)
return -ENODEV;
}
/*
* A zero mmap always succeeds in Linux, independent of
* whether or not the remaining arguments are valid.
* A zero mmap always succeeds in Linux, independent of whether or not the
* remaining arguments are valid.
*/
if (PAGE_ALIGN(len) == 0)
len = PAGE_ALIGN(len);
if (len == 0)
return addr;
/* don't permit mappings into unmapped space or the virtual page table of a region: */
......@@ -192,13 +205,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
if (rgn_index(addr) != rgn_index(addr + len))
return -EINVAL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
return -EBADF;
}
down_write(&current->mm->mmap_sem);
addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);
......@@ -247,13 +253,6 @@ sys_vm86 (long arg0, long arg1, long arg2, long arg3)
return -ENOSYS;
}
asmlinkage long
sys_modify_ldt (long arg0, long arg1, long arg2, long arg3)
{
printk(KERN_ERR "sys_modify_ldt(%lx, %lx, %lx, %lx)!\n", arg0, arg1, arg2, arg3);
return -ENOSYS;
}
asmlinkage unsigned long
ia64_create_module (const char *name_user, size_t size, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack)
......
......@@ -25,6 +25,7 @@
extern rwlock_t xtime_lock;
extern unsigned long wall_jiffies;
extern unsigned long last_time_offset;
#ifdef CONFIG_IA64_DEBUG_IRQ
......@@ -45,9 +46,8 @@ do_profile (unsigned long ip)
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
*/
if (ip > prof_len - 1)
ip = prof_len - 1;
......@@ -57,34 +57,29 @@ do_profile (unsigned long ip)
}
/*
* Return the number of micro-seconds that elapsed since the last
* update to jiffy. The xtime_lock must be at least read-locked when
* calling this routine.
* Return the number of micro-seconds that elapsed since the last update to jiffy. The
* xtime_lock must be at least read-locked when calling this routine.
*/
static inline unsigned long
gettimeoffset (void)
{
#ifdef CONFIG_SMP
/*
* The code below doesn't work for SMP because only CPU 0
* keeps track of the time.
*/
return 0;
#else
unsigned long now = ia64_get_itc(), last_tick;
unsigned long elapsed_cycles, lost = jiffies - wall_jiffies;
unsigned long now, last_tick;
# define time_keeper_id 0 /* smp_processor_id() of time-keeper */
last_tick = (local_cpu_data->itm_next - (lost+1)*local_cpu_data->itm_delta);
# if 1
last_tick = (cpu_data(time_keeper_id)->itm_next
- (lost + 1)*cpu_data(time_keeper_id)->itm_delta);
now = ia64_get_itc();
if ((long) (now - last_tick) < 0) {
printk("Yikes: now < last_tick (now=0x%lx,last_tick=%lx)! No can do.\n",
now, last_tick);
return 0;
}
# if 1
printk("CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!\n",
smp_processor_id(), now, last_tick);
# endif
return last_time_offset;
}
elapsed_cycles = now - last_tick;
return (elapsed_cycles*local_cpu_data->usec_per_cyc) >> IA64_USEC_PER_CYC_SHIFT;
#endif
}
void
......@@ -93,11 +88,10 @@ do_settimeofday (struct timeval *tv)
write_lock_irq(&xtime_lock);
{
/*
* This is revolting. We need to set "xtime"
* correctly. However, the value in this location is
* the value at the most recent update of wall time.
* Discover what correction gettimeofday would have
* done, and then undo it!
* This is revolting. We need to set "xtime" correctly. However, the value
* in this location is the value at the most recent update of wall time.
* Discover what correction gettimeofday would have done, and then undo
* it!
*/
tv->tv_usec -= gettimeoffset();
tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
......@@ -119,12 +113,24 @@ do_settimeofday (struct timeval *tv)
void
do_gettimeofday (struct timeval *tv)
{
unsigned long flags, usec, sec;
unsigned long flags, usec, sec, old;
read_lock_irqsave(&xtime_lock, flags);
{
usec = gettimeoffset();
/*
* Ensure time never goes backwards, even when ITC on different CPUs are
* not perfectly synchronized.
*/
do {
old = last_time_offset;
if (usec <= old) {
usec = old;
break;
}
} while (cmpxchg(&last_time_offset, old, usec) != old);
sec = xtime.tv_sec;
usec += xtime.tv_usec;
}
......@@ -162,6 +168,8 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#ifdef CONFIG_SMP
smp_do_timer(regs);
#endif
new_itm += local_cpu_data->itm_delta;
if (smp_processor_id() == 0) {
/*
* Here we are in the timer irq handler. We have irqs locally
......@@ -171,11 +179,11 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
write_lock(&xtime_lock);
do_timer(regs);
local_cpu_data->itm_next = new_itm;
write_unlock(&xtime_lock);
}
} else
local_cpu_data->itm_next = new_itm;
new_itm += local_cpu_data->itm_delta;
local_cpu_data->itm_next = new_itm;
if (time_after(new_itm, ia64_get_itc()))
break;
}
......@@ -228,9 +236,9 @@ ia64_init_itm (void)
long status;
/*
* According to SAL v2.6, we need to use a SAL call to determine the
* platform base frequency and then a PAL call to determine the
* frequency ratio between the ITC and the base frequency.
* According to SAL v2.6, we need to use a SAL call to determine the platform base
* frequency and then a PAL call to determine the frequency ratio between the ITC
* and the base frequency.
*/
status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &platform_base_freq, &drift);
if (status != 0) {
......@@ -284,6 +292,6 @@ void __init
time_init (void)
{
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
efi_gettimeofday(&xtime);
efi_gettimeofday((struct timeval *) &xtime);
ia64_init_itm();
}
......@@ -215,12 +215,9 @@ static inline int
fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
struct pt_regs *regs)
{
struct ia64_fpreg f6_11[6];
fp_state_t fp_state;
fpswa_ret_t ret;
#define FPSWA_BUG
#ifdef FPSWA_BUG
struct ia64_fpreg f6_15[10];
#endif
if (!fpswa_interface)
return -1;
......@@ -232,23 +229,12 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
* kernel, so set those bits in the mask and set the low volatile
* pointer to point to these registers.
*/
#ifndef FPSWA_BUG
fp_state.bitmask_low64 = 0x3c0; /* bit 6..9 */
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
#else
fp_state.bitmask_low64 = 0xffc0; /* bit6..bit15 */
f6_15[0] = regs->f6;
f6_15[1] = regs->f7;
f6_15[2] = regs->f8;
f6_15[3] = regs->f9;
__asm__ ("stf.spill %0=f10%P0" : "=m"(f6_15[4]));
__asm__ ("stf.spill %0=f11%P0" : "=m"(f6_15[5]));
__asm__ ("stf.spill %0=f12%P0" : "=m"(f6_15[6]));
__asm__ ("stf.spill %0=f13%P0" : "=m"(f6_15[7]));
__asm__ ("stf.spill %0=f14%P0" : "=m"(f6_15[8]));
__asm__ ("stf.spill %0=f15%P0" : "=m"(f6_15[9]));
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) f6_15;
#endif
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
f6_11[0] = regs->f6; f6_11[1] = regs->f7;
f6_11[2] = regs->f8; f6_11[3] = regs->f9;
__asm__ ("stf.spill %0=f10%P0" : "=m"(f6_11[4]));
__asm__ ("stf.spill %0=f11%P0" : "=m"(f6_11[5]));
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) f6_11;
/*
* unsigned long (*EFI_FPSWA) (
* unsigned long trap_type,
......@@ -264,18 +250,10 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
(unsigned long *) ipsr, (unsigned long *) fpsr,
(unsigned long *) isr, (unsigned long *) pr,
(unsigned long *) ifs, &fp_state);
#ifdef FPSWA_BUG
__asm__ ("ldf.fill f10=%0%P0" :: "m"(f6_15[4]));
__asm__ ("ldf.fill f11=%0%P0" :: "m"(f6_15[5]));
__asm__ ("ldf.fill f12=%0%P0" :: "m"(f6_15[6]));
__asm__ ("ldf.fill f13=%0%P0" :: "m"(f6_15[7]));
__asm__ ("ldf.fill f14=%0%P0" :: "m"(f6_15[8]));
__asm__ ("ldf.fill f15=%0%P0" :: "m"(f6_15[9]));
regs->f6 = f6_15[0];
regs->f7 = f6_15[1];
regs->f8 = f6_15[2];
regs->f9 = f6_15[3];
#endif
regs->f6 = f6_11[0]; regs->f7 = f6_11[1];
regs->f8 = f6_11[2]; regs->f9 = f6_11[3];
__asm__ ("ldf.fill f10=%0%P0" :: "m"(f6_11[4]));
__asm__ ("ldf.fill f11=%0%P0" :: "m"(f6_11[5]));
return ret.status;
}
......@@ -321,7 +299,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
}
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = 0;
siginfo.si_code = __SI_FAULT; /* default code */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x11) {
siginfo.si_code = FPE_FLTINV;
......@@ -339,7 +317,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
/* raise exception */
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = 0;
siginfo.si_code = __SI_FAULT; /* default code */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x880) {
siginfo.si_code = FPE_FLTOVF;
......@@ -443,14 +421,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
sprintf(buf, "General Exception: %s%s", reason[code],
(code == 3) ? ((isr & (1UL << 37))
? " (RSE access)" : " (data access)") : "");
#ifndef CONFIG_ITANIUM_ASTEP_SPECIFIC
if (code == 8) {
# ifdef CONFIG_IA64_PRINT_HAZARDS
printk("%016lx:possible hazard, pr = %016lx\n", regs->cr_iip, regs->pr);
# endif
return;
}
#endif
break;
case 25: /* Disabled FP-Register */
......
......@@ -325,11 +325,11 @@ set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
DPRINT("ubs_end=%p bsp=%p addr=%px\n", (void *) ubs_end, (void *) bsp, (void *) addr);
ia64_poke(current, (unsigned long) ubs_end, (unsigned long) addr, val);
ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
rnat_addr = ia64_rse_rnat_addr(addr);
ia64_peek(current, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
(void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
......@@ -338,7 +338,7 @@ set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
rnats |= nat_mask;
else
rnats &= ~nat_mask;
ia64_poke(current, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
}
......@@ -394,7 +394,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *na
DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
ia64_peek(current, (unsigned long) ubs_end, (unsigned long) addr, val);
ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
if (nat) {
rnat_addr = ia64_rse_rnat_addr(addr);
......@@ -402,7 +402,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *na
DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
ia64_peek(current, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
*nat = (rnats & nat_mask) != 0;
}
}
......@@ -1299,7 +1299,12 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
"ip=0x%016lx\n\r", current->comm, current->pid,
ifa, regs->cr_iip + ipsr->ri);
tty_write_message(current->tty, buf);
/*
* Don't call tty_write_message() if we're in the kernel; we might
* be holding locks...
*/
if (user_mode(regs))
tty_write_message(current->tty, buf);
buf[len-1] = '\0'; /* drop '\r' */
printk(KERN_WARNING "%s", buf); /* watch for command names containing %s */
}
......
This diff is collapsed.
......@@ -58,7 +58,7 @@ struct unw_table {
unsigned long segment_base; /* base for offsets in the unwind table entries */
unsigned long start;
unsigned long end;
struct unw_table_entry *array;
const struct unw_table_entry *array;
unsigned long length;
};
......
......@@ -14,11 +14,7 @@ obj-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
checksum.o clear_page.o csum_partial_copy.o copy_page.o \
copy_user.o clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o io.o do_csum.o \
swiotlb.o
ifneq ($(CONFIG_ITANIUM_ASTEP_SPECIFIC),y)
obj-y += memcpy.o memset.o strlen.o
endif
memcpy.o memset.o strlen.o swiotlb.o
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
......
......@@ -9,7 +9,7 @@
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed..
*/
#include <linux/string.h>
#include <asm/byteorder.h>
......@@ -55,8 +55,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr,
((unsigned long) ntohs(len) << 16) +
((unsigned long) proto << 8));
/* Fold down to 32-bits so we don't loose in the typedef-less
network stack. */
/* Fold down to 32-bits so we don't loose in the typedef-less network stack. */
/* 64 to 33 */
result = (result & 0xffffffff) + (result >> 32);
/* 33 to 32 */
......@@ -64,8 +63,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr,
return result;
}
extern unsigned long do_csum(const unsigned char *, unsigned int, unsigned int);
extern unsigned long do_csum_c(const unsigned char *, unsigned int, unsigned int);
extern unsigned long do_csum (const unsigned char *, long);
/*
* This is a version of ip_compute_csum() optimized for IP headers,
......@@ -73,7 +71,7 @@ extern unsigned long do_csum_c(const unsigned char *, unsigned int, unsigned int
*/
unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
{
return ~do_csum(iph,ihl*4,0);
return ~do_csum(iph, ihl*4);
}
/*
......@@ -90,7 +88,7 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
*/
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
{
unsigned long result = do_csum(buff, len, 0);
unsigned long result = do_csum(buff, len);
/* add in old sum, and carry.. */
result += sum;
......@@ -106,5 +104,5 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
*/
unsigned short ip_compute_csum(unsigned char * buff, int len)
{
return ~do_csum(buff,len, 0);
return ~do_csum(buff,len);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment