Commit 6eba10be authored by David Mosberger's avatar David Mosberger

ia64: Manual merge with 2.5.71.

parents 40d45b93 17e74013
......@@ -26,6 +26,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
config TIME_INTERPOLATION
bool
default y
choice
prompt "IA-64 processor type"
default ITANIUM
......@@ -63,7 +67,7 @@ config IA64_GENERIC
HP-simulator For the HP simulator
(<http://software.hp.com/ia64linux/>).
HP-zx1 For HP zx1-based systems.
SN1-simulator For the SGI SN1 simulator.
SGI-SN2 For SGI Altix systems
DIG-compliant For DIG ("Developer's Interface Guide") compliant
systems.
......@@ -82,9 +86,6 @@ config IA64_HP_ZX1
for the zx1 I/O MMU and makes root bus bridges appear in PCI config
space (required for zx1 agpgart support).
config IA64_SGI_SN1
bool "SGI-SN1"
config IA64_SGI_SN2
bool "SGI-SN2"
......@@ -190,8 +191,8 @@ config ITANIUM_BSTEP_SPECIFIC
# align cache-sensitive data to 128 bytes
config IA64_L1_CACHE_SHIFT
int
default "7" if MCKINLEY || ITANIUM && IA64_SGI_SN1
default "6" if ITANIUM && !IA64_SGI_SN1
default "7" if MCKINLEY
default "6" if ITANIUM
# align cache-sensitive data to 64 bytes
config MCKINLEY_ASTEP_SPECIFIC
......@@ -210,7 +211,7 @@ config MCKINLEY_A0_SPECIFIC
config NUMA
bool "Enable NUMA support" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y if IA64_SGI_SN1 || IA64_SGI_SN2
default y if IA64_SGI_SN2
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
......@@ -234,7 +235,7 @@ endchoice
config DISCONTIGMEM
bool
depends on IA64_SGI_SN1 || IA64_SGI_SN2 || (IA64_GENERIC || IA64_DIG || IA64_HP_ZX1) && NUMA
depends on IA64_SGI_SN2 || (IA64_GENERIC || IA64_DIG || IA64_HP_ZX1) && NUMA
default y
help
Say Y to support efficient handling of discontiguous physical memory,
......@@ -259,7 +260,7 @@ config VIRTUAL_MEM_MAP
config IA64_MCA
bool "Enable IA-64 Machine Check Abort" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y if IA64_SGI_SN1 || IA64_SGI_SN2
default y if IA64_SGI_SN2
help
Say Y here to enable machine check support for IA-64. If you're
unsure, answer Y.
......@@ -288,17 +289,12 @@ config PM
config IOSAPIC
bool
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y
config IA64_SGI_SN
bool
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_SGI_SN2
default y
config IA64_SGI_SN_DEBUG
bool "Enable extra debugging code"
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_SGI_SN2
help
Turns on extra debugging code in the SGI SN (Scalable NUMA) platform
for IA-64. Unless you are debugging problems on an SGI SN IA-64 box,
......@@ -306,14 +302,14 @@ config IA64_SGI_SN_DEBUG
config IA64_SGI_SN_SIM
bool "Enable SGI Medusa Simulator Support"
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_SGI_SN2
help
If you are compiling a kernel that will run under SGI's IA-64
simulator (Medusa) then say Y, otherwise say N.
config IA64_SGI_AUTOTEST
bool "Enable autotest (llsc). Option to run cache test instead of booting"
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_SGI_SN2
help
Build a kernel used for hardware validation. If you include the
keyword "autotest" on the boot command line, the kernel does NOT boot.
......@@ -323,7 +319,7 @@ config IA64_SGI_AUTOTEST
config SERIAL_SGI_L1_PROTOCOL
bool "Enable protocol mode for the L1 console"
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_SGI_SN2
help
Uses protocol mode instead of raw mode for the level 1 console on the
SGI SN (Scalable NUMA) platform for IA-64. If you are compiling for
......@@ -331,17 +327,9 @@ config SERIAL_SGI_L1_PROTOCOL
config PERCPU_IRQ
bool
depends on IA64_SGI_SN1 || IA64_SGI_SN2
depends on IA64_SGI_SN2
default y
config PCIBA
tristate "PCIBA support"
depends on IA64_SGI_SN1 || IA64_SGI_SN2
help
IRIX PCIBA-inspired user mode PCI interface for the SGI SN (Scalable
NUMA) platform for IA-64. Unless you are compiling a kernel for an
SGI SN IA-64 box, say N.
# On IA-64, we always want an ELF /proc/kcore.
config KCORE_ELF
bool
......@@ -782,20 +770,18 @@ endmenu
source "drivers/usb/Kconfig"
source "lib/Kconfig"
source "net/bluetooth/Kconfig"
endif
source "lib/Kconfig"
source "arch/ia64/hp/sim/Kconfig"
menu "Kernel hacking"
config FSYS
bool "Light-weight system-call support (via epc)"
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
......@@ -862,7 +848,7 @@ config MAGIC_SYSRQ
config IA64_EARLY_PRINTK
bool "Early printk support"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && !IA64_GENERIC
help
Selecting this option uses the VGA screen or serial console for
printk() output before the consoles are initialised. It is useful
......
......@@ -18,8 +18,8 @@ LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
-falign-functions=32
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-falign-functions=32 -frename-registers
CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
......@@ -27,6 +27,10 @@ GCC_MINOR_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' |
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
arch-cppflags := $(shell arch/ia64/scripts/toolchain-flags $(CC) $(LD) $(OBJDUMP))
cflags-y += $(arch-cppflags)
AFLAGS += $(arch-cppflags)
ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \
a source-tree that post-dates 18-Dec-2002. You can find a pre-compiled \
......@@ -35,19 +39,18 @@ $(error Sorry, you need a newer version of the assember, one that is built from
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
ifneq ($(GCC_VERSION),2)
cflags-$(CONFIG_ITANIUM) += -frename-registers
ifeq ($(GCC_VERSION),2)
$(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad code.)
endif
ifeq ($(GCC_VERSION),3)
ifeq ($(GCC_MINOR_VERSION),4)
cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
endif
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC) += -mb-step
cflags-$(CONFIG_IA64_SGI_SN) += -DBRINGUP
CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
......@@ -58,7 +61,7 @@ core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
......@@ -66,33 +69,37 @@ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
boot := arch/ia64/boot
tools := arch/ia64/tools
.PHONY: boot compressed include/asm-ia64/offsets.h
all: prepare vmlinux
.PHONY: boot compressed check
compressed: vmlinux.gz
vmlinux.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) vmlinux.gz
$(Q)$(MAKE) $(build)=$(boot) $@
check: vmlinux
arch/ia64/scripts/unwcheck.sh vmlinux
arch/ia64/scripts/unwcheck.sh $<
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(tools)
CLEAN_FILES += include/asm-ia64/offsets.h vmlinux.gz bootloader
CLEAN_FILES += include/asm-ia64/.offsets.h.stamp include/asm-ia64/offsets.h vmlinux.gz bootloader
prepare: include/asm-ia64/offsets.h
include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
$(call filechk,gen-asm-offsets)
arch/ia64/kernel/asm-offsets.s: include/asm-ia64/.offsets.h.stamp
include/asm-ia64/.offsets.h.stamp:
[ -s include/asm-ia64/offsets.h ] \
|| echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/offsets.h
touch $@
boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
include/asm-ia64/offsets.h: include/asm include/linux/version.h include/config/MARKER
$(Q)$(MAKE) $(build)=$(tools) $@
define archhelp
echo ' compressed - Build compressed kernel image'
......
......@@ -181,10 +181,10 @@ _start (void)
continue;
req.len = elf_phdr->p_filesz;
req.addr = __pa(elf_phdr->p_vaddr);
req.addr = __pa(elf_phdr->p_paddr);
ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ);
ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION);
memset((char *)__pa(elf_phdr->p_vaddr) + elf_phdr->p_filesz, 0,
memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0,
elf_phdr->p_memsz - elf_phdr->p_filesz);
}
ssc(fd, 0, 0, 0, SSC_CLOSE);
......
......@@ -1682,6 +1682,10 @@ ioc_init(u64 hpa, void *handle)
ioc_resource_init(ioc);
ioc_sac_init(ioc);
if ((long) ~IOVP_MASK > (long) ia64_max_iommu_merge_mask)
ia64_max_iommu_merge_mask = ~IOVP_MASK;
MAX_DMA_ADDRESS = ~0UL;
printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
......@@ -1933,8 +1937,6 @@ static struct acpi_driver acpi_sba_ioc_driver = {
static int __init
sba_init(void)
{
MAX_DMA_ADDRESS = ~0UL;
acpi_bus_register_driver(&acpi_sba_ioc_driver);
#ifdef CONFIG_PCI
......
......@@ -8,6 +8,10 @@ config HP_SIMETH
config HP_SIMSERIAL
bool "Simulated serial driver support"
config HP_SIMSERIAL_CONSOLE
bool "Console for HP simulator"
depends on HP_SIMSERIAL
config HP_SIMSCSI
bool "Simulated SCSI disk"
depends on SCSI
......
......@@ -7,9 +7,10 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
obj-y := hpsim_irq.o hpsim_setup.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
......@@ -5,6 +5,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#include <linux/config.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
......@@ -24,8 +25,6 @@
#include "hpsim_ssc.h"
extern struct console hpsim_cons;
/*
* Simulator system call.
*/
......@@ -56,5 +55,11 @@ hpsim_setup (char **cmdline_p)
{
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
register_console(&hpsim_cons);
#ifdef CONFIG_HP_SIMSERIAL_CONSOLE
{
extern struct console hpsim_cons;
if (ia64_platform_is("hpsim"))
register_console(&hpsim_cons);
}
#endif
}
......@@ -1031,6 +1031,9 @@ simrs_init (void)
int i;
struct serial_state *state;
if (!ia64_platform_is("hpsim"))
return -ENODEV;
hp_simserial_driver = alloc_tty_driver(1);
if (!hp_simserial_driver)
return -ENOMEM;
......
......@@ -16,7 +16,8 @@
#include <asm/param.h>
#include <asm/signal.h>
#include <asm/ia32.h>
#include "ia32priv.h"
#define CONFIG_BINFMT_ELF32
......
......@@ -439,8 +439,8 @@ ia32_syscall_table:
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 compat_sys_futex /* 240 */
data8 compat_sys_setaffinity
data8 compat_sys_getaffinity
data8 compat_sys_sched_setaffinity
data8 compat_sys_sched_getaffinity
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 sys_ni_syscall /* 245 */
......
......@@ -11,36 +11,106 @@
#include <linux/dirent.h>
#include <linux/fs.h> /* argh, msdos_fs.h isn't self-contained... */
#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
#include <asm/ia32.h>
#include <linux/msdos_fs.h>
#include <linux/mtio.h>
#include <linux/ncp_fs.h>
#include <linux/capi.h>
#include <linux/videodev.h>
#include <linux/synclink.h>
#include <linux/atmdev.h>
#include <linux/atm_eni.h>
#include <linux/atm_nicstar.h>
#include <linux/atm_zatm.h>
#include <linux/atm_idt77105.h>
#include <linux/compat.h>
#include "ia32priv.h"
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/ioctl.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/raid/md.h>
#include <linux/kd.h>
#include <linux/route.h>
#include <linux/in6.h>
#include <linux/ipv6_route.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vt.h>
#include <linux/file.h>
#include <linux/fd.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/ixjuser.h>
#include <linux/i2o-dev.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/cdrom.h>
#include <linux/loop.h>
#include <linux/auto_fs.h>
#include <linux/auto_fs4.h>
#include <linux/devfs_fs.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/fb.h>
#include <linux/ext2_fs.h>
#include <linux/videodev.h>
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/smb_fs.h>
#include <linux/blkpg.h>
#include <linux/blk.h>
#include <linux/elevator.h>
#include <linux/rtc.h>
#include <linux/pci.h>
#include <linux/rtc.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/reiserfs_fs.h>
#include <linux/if_tun.h>
#include <linux/dirent.h>
#include <linux/ctype.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/rfcomm.h>
#include <scsi/scsi.h>
/* Ugly hack. */
#undef __KERNEL__
#undef __KERNEL__
#include <scsi/scsi_ioctl.h>
#define __KERNEL__
#define __KERNEL__
#include <scsi/sg.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_bonding.h>
#include <linux/watchdog.h>
#include <asm/module.h>
#include <asm/ioctl32.h>
#include <linux/soundcard.h>
#include <linux/lp.h>
#include <linux/atm.h>
#include <linux/atmarp.h>
#include <linux/atmclip.h>
#include <linux/atmdev.h>
#include <linux/atmioc.h>
#include <linux/atmlec.h>
#include <linux/atmmpc.h>
#include <linux/atmsvc.h>
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
#include <linux/mtd/mtd.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/nbd.h>
#include <linux/random.h>
#include <linux/filter.h>
#include <../drivers/char/drm/drm.h>
#include <../drivers/char/drm/mga_drm.h>
#include <../drivers/char/drm/i810_drm.h>
#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
#define DO_IOCTL(fd, cmd, arg) ({ \
......@@ -57,6 +127,9 @@
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct linux32_dirent[2])
#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct linux32_dirent[2])
static long
put_dirent32 (struct dirent *d, struct linux32_dirent *d32)
{
......@@ -67,6 +140,23 @@ put_dirent32 (struct dirent *d, struct linux32_dirent *d32)
|| put_user(d->d_reclen, &d32->d_reclen)
|| copy_to_user(d32->d_name, d->d_name, namelen + 1));
}
static int vfat_ioctl32(unsigned fd, unsigned cmd, void *ptr)
{
int ret;
mm_segment_t oldfs = get_fs();
struct dirent d[2];
set_fs(KERNEL_DS);
ret = sys_ioctl(fd,cmd,(unsigned long)&d);
set_fs(oldfs);
if (!ret) {
ret |= put_dirent32(&d[0], (struct linux32_dirent *)ptr);
ret |= put_dirent32(&d[1], ((struct linux32_dirent *)ptr) + 1);
}
return ret;
}
/*
* The transform code for the SG_IO ioctl was brazenly lifted from
* the Sparc64 port in the file `arch/sparc64/kernel/ioctl32.c'.
......@@ -294,3 +384,83 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
}
return err;
}
static __inline__ void *alloc_user_space(long len)
{
struct pt_regs *regs = ((struct pt_regs *)((unsigned long) current +
IA64_STK_OFFSET)) - 1;
return (void *)regs->r12 - len;
}
struct ifmap32 {
u32 mem_start;
u32 mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
};
struct ifreq32 {
#define IFHWADDRLEN 6
#define IFNAMSIZ 16
union {
char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
} ifr_ifrn;
union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
int ifru_ivalue;
int ifru_mtu;
struct ifmap32 ifru_map;
char ifru_slave[IFNAMSIZ]; /* Just fits the size */
char ifru_newname[IFNAMSIZ];
compat_caddr_t ifru_data;
} ifr_ifru;
};
int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct ifreq *u_ifreq64;
struct ifreq32 *u_ifreq32 = (struct ifreq32 *) arg;
char tmp_buf[IFNAMSIZ];
void *data64;
u32 data32;
if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
IFNAMSIZ))
return -EFAULT;
if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
return -EFAULT;
data64 = (void *) P(data32);
u_ifreq64 = alloc_user_space(sizeof(*u_ifreq64));
/* Don't check these user accesses, just let that get trapped
* in the ioctl handler instead.
*/
copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ);
__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data);
return sys_ioctl(fd, cmd, (unsigned long) u_ifreq64);
}
typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
#define IOCTL_TABLE_START \
struct ioctl_trans ioctl_start[] = {
#define IOCTL_TABLE_END \
}; struct ioctl_trans ioctl_end[0];
IOCTL_TABLE_START
#include <linux/compat_ioctl.h>
HANDLE_IOCTL(VFAT_IOCTL_READDIR_BOTH32, vfat_ioctl32)
HANDLE_IOCTL(VFAT_IOCTL_READDIR_SHORT32, vfat_ioctl32)
HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
IOCTL_TABLE_END
......@@ -14,7 +14,8 @@
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include <asm/ia32.h>
#include "ia32priv.h"
#define P(p) ((void *) (unsigned long) (p))
......
......@@ -28,7 +28,8 @@
#include <asm/rse.h>
#include <asm/sigcontext.h>
#include <asm/segment.h>
#include <asm/ia32.h>
#include "ia32priv.h"
#include "../kernel/sigframe.h"
......@@ -179,8 +180,10 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* datasel ar.fdr(32:47)
*
* _st[(0+TOS)%8] f8
* _st[(1+TOS)%8] f9 (f8, f9 from ptregs)
* : : : (f10..f15 from live reg)
* _st[(1+TOS)%8] f9
* _st[(2+TOS)%8] f10
* _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
* : : : (f12..f15 from live reg)
* : : :
* _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
*
......@@ -262,8 +265,8 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
/*
* save f8 and f9 from pt_regs
* save f10..f15 from live register set
* save f8..f11 from pt_regs
* save f12..f15 from live register set
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -278,11 +281,11 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f9);
copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 10);
ia64f2ia32f(fpregp, &ptp->f10);
copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 11);
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13);
......@@ -394,8 +397,8 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
/*
* restore f8, f9 onto pt_regs
* restore f10..f15 onto live registers
* restore f8..f11 onto pt_regs
* restore f12..f15 onto live registers
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -411,11 +414,11 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f9, fpregp);
copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(10, fpregp);
ia32f2ia64f(&ptp->f10, fpregp);
copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(11, fpregp);
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
......@@ -738,11 +741,11 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) tmp << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) tmp << 32)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
#define copyseg_cs(tmp) (regs->r17 |= tmp)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) tmp << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) tmp << 16)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
#define copyseg_ds(tmp) (regs->r16 |= tmp)
#define COPY_SEG(seg) \
......
......@@ -22,7 +22,8 @@
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/ia32.h>
#include "ia32priv.h"
extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
......@@ -60,30 +61,26 @@ ia32_load_segment_descriptors (struct task_struct *task)
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
task->thread.csd = load_desc(regs->r17 >> 0); /* CSD */
task->thread.ssd = load_desc(regs->r17 >> 16); /* SSD */
regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
void
ia32_save_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
unsigned long eflag, fsr, fcr, fir, fdr;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
"mov %5=ar.csd;"
"mov %6=ar.ssd;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
t->thread.csd = csd;
t->thread.ssd = ssd;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
......@@ -91,7 +88,7 @@ ia32_save_state (struct task_struct *t)
void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
......@@ -100,8 +97,6 @@ ia32_load_state (struct task_struct *t)
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
csd = t->thread.csd;
ssd = t->thread.ssd;
tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;"
......@@ -109,9 +104,7 @@ ia32_load_state (struct task_struct *t)
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
"mov ar.csd=%5;"
"mov ar.ssd=%6;"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
......@@ -181,6 +174,13 @@ ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
force_sig_info(SIGTRAP, &siginfo, current);
}
void
ia32_cpu_init (void)
{
/* initialize global ia32 state - CR0 and CR4 */
asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
}
static int __init
ia32_init (void)
{
......
......@@ -12,7 +12,8 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/ia32.h>
#include "ia32priv.h"
#include <asm/ptrace.h>
int
......
This diff is collapsed.
......@@ -53,7 +53,8 @@
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/ia32.h>
#include "ia32priv.h"
#include <net/scm.h>
#include <net/sock.h>
......@@ -206,9 +207,8 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf)
static int
get_page_prot (unsigned long addr)
get_page_prot (struct vm_area_struct *vma, unsigned long addr)
{
struct vm_area_struct *vma = find_vma(current->mm, addr);
int prot = 0;
if (!vma || vma->vm_start > addr)
......@@ -231,14 +231,26 @@ static unsigned long
mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
loff_t off)
{
void *page = (void *) get_zeroed_page(GFP_KERNEL);
void *page = NULL;
struct inode *inode;
unsigned long ret;
int old_prot = get_page_prot(start);
unsigned long ret = 0;
struct vm_area_struct *vma = find_vma(current->mm, start);
int old_prot = get_page_prot(vma, start);
DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
file, start, end, prot, flags, off);
/* Optimize the case where the old mmap and the new mmap are both anonymous */
if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
if (clear_user((void *) start, end - start)) {
ret = -EFAULT;
goto out;
}
goto skip_mmap;
}
page = (void *) get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
......@@ -263,6 +275,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
copy_to_user((void *) end, page + PAGE_OFF(end),
PAGE_SIZE - PAGE_OFF(end));
}
if (!(flags & MAP_ANONYMOUS)) {
/* read the file contents */
inode = file->f_dentry->d_inode;
......@@ -273,10 +286,13 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
goto out;
}
}
skip_mmap:
if (!(prot & PROT_WRITE))
ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
out:
free_page((unsigned long) page);
if (page)
free_page((unsigned long) page);
return ret;
}
......@@ -532,11 +548,12 @@ static long
mprotect_subpage (unsigned long address, int new_prot)
{
int old_prot;
struct vm_area_struct *vma;
if (new_prot == PROT_NONE)
return 0; /* optimize case where nothing changes... */
old_prot = get_page_prot(address);
vma = find_vma(current->mm, address);
old_prot = get_page_prot(vma, address);
return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
}
......@@ -836,9 +853,8 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev
}
}
size = FDS_BYTES(n);
ret = -EINVAL;
if (n < 0 || size < n)
if (n < 0)
goto out_nofds;
if (n > current->files->max_fdset)
......@@ -850,6 +866,7 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev
* long-words.
*/
ret = -ENOMEM;
size = FDS_BYTES(n);
bits = kmalloc(6 * size, GFP_KERNEL);
if (!bits)
goto out_nofds;
......@@ -1102,7 +1119,7 @@ struct shmid_ds32 {
};
struct shmid64_ds32 {
struct ipc64_perm shm_perm;
struct ipc64_perm32 shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
unsigned int __unused1;
......@@ -1320,7 +1337,6 @@ static int
msgctl32 (int first, int second, void *uptr)
{
int err = -EINVAL, err2;
struct msqid_ds m;
struct msqid64_ds m64;
struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
......@@ -1336,21 +1352,21 @@ msgctl32 (int first, int second, void *uptr)
case IPC_SET:
if (version == IPC_64) {
err = get_user(m.msg_perm.uid, &up64->msg_perm.uid);
err |= get_user(m.msg_perm.gid, &up64->msg_perm.gid);
err |= get_user(m.msg_perm.mode, &up64->msg_perm.mode);
err |= get_user(m.msg_qbytes, &up64->msg_qbytes);
err = get_user(m64.msg_perm.uid, &up64->msg_perm.uid);
err |= get_user(m64.msg_perm.gid, &up64->msg_perm.gid);
err |= get_user(m64.msg_perm.mode, &up64->msg_perm.mode);
err |= get_user(m64.msg_qbytes, &up64->msg_qbytes);
} else {
err = get_user(m.msg_perm.uid, &up32->msg_perm.uid);
err |= get_user(m.msg_perm.gid, &up32->msg_perm.gid);
err |= get_user(m.msg_perm.mode, &up32->msg_perm.mode);
err |= get_user(m.msg_qbytes, &up32->msg_qbytes);
err = get_user(m64.msg_perm.uid, &up32->msg_perm.uid);
err |= get_user(m64.msg_perm.gid, &up32->msg_perm.gid);
err |= get_user(m64.msg_perm.mode, &up32->msg_perm.mode);
err |= get_user(m64.msg_qbytes, &up32->msg_qbytes);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_msgctl(first, second, &m);
err = sys_msgctl(first, second, &m64);
set_fs(old_fs);
break;
......@@ -1430,7 +1446,7 @@ static int
shmctl32 (int first, int second, void *uptr)
{
int err = -EFAULT, err2;
struct shmid_ds s;
struct shmid64_ds s64;
struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
......@@ -1482,19 +1498,19 @@ shmctl32 (int first, int second, void *uptr)
case IPC_SET:
if (version == IPC_64) {
err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
err = get_user(s64.shm_perm.uid, &up64->shm_perm.uid);
err |= get_user(s64.shm_perm.gid, &up64->shm_perm.gid);
err |= get_user(s64.shm_perm.mode, &up64->shm_perm.mode);
} else {
err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
err = get_user(s64.shm_perm.uid, &up32->shm_perm.uid);
err |= get_user(s64.shm_perm.gid, &up32->shm_perm.gid);
err |= get_user(s64.shm_perm.mode, &up32->shm_perm.mode);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_shmctl(first, second, &s);
err = sys_shmctl(first, second, &s64);
set_fs(old_fs);
break;
......@@ -1798,12 +1814,16 @@ put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
ia64f2ia32f(f, &ptp->f9);
break;
case 2:
ia64f2ia32f(f, &ptp->f10);
break;
case 3:
ia64f2ia32f(f, &ptp->f11);
break;
case 4:
case 5:
case 6:
case 7:
ia64f2ia32f(f, &swp->f10 + (regno - 2));
ia64f2ia32f(f, &swp->f12 + (regno - 4));
break;
}
copy_to_user(reg, f, sizeof(*reg));
......@@ -1824,12 +1844,16 @@ get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
copy_from_user(&ptp->f9, reg, sizeof(*reg));
break;
case 2:
copy_from_user(&ptp->f10, reg, sizeof(*reg));
break;
case 3:
copy_from_user(&ptp->f11, reg, sizeof(*reg));
break;
case 4:
case 5:
case 6:
case 7:
copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg));
copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
break;
}
return;
......@@ -1860,7 +1884,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
put_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
......@@ -1893,7 +1917,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *sav
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
get_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
......
......@@ -4,12 +4,11 @@
extra-y := head.o init_task.o
obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
sys_ia64.o time.o traps.o unaligned.o unwind.o
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o unwind.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
......@@ -18,3 +17,20 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
AFLAGS_gate.lds.o += -P -C -U$(ARCH)
arch/ia64/kernel/gate.lds.s: %.s: %.S scripts FORCE
$(call if_changed_dep,as_s_S)
$(obj)/gate.so: $(src)/gate.lds.s $(obj)/gate.o
$(CC) -nostdlib -shared -s -Wl,-soname=linux-gate.so.1 \
-o $@ -Wl,-T,$^
extra-y += gate.so
$(obj)/built-in.o: $(obj)/gate-syms.o
$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
$(obj)/gate-syms.o: $(src)/gate.lds.s $(obj)/gate.o
$(CC) -nostdlib -r -o $@ -Wl,-T,$^
extra-y += gate-syms.o
$(obj)/gate-data.o: $(obj)/gate.so
......@@ -84,7 +84,6 @@ hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
acpi_status status;
u8 *data;
u32 length;
int i;
status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
......
......@@ -96,6 +96,9 @@ acpi_get_sysname (void)
if (!strcmp(hdr->oem_id, "HP")) {
return "hpzx1";
}
else if (!strcmp(hdr->oem_id, "SGI")) {
return "sn2";
}
return "dig";
#else
......@@ -103,8 +106,6 @@ acpi_get_sysname (void)
return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# elif defined (CONFIG_IA64_SGI_SN1)
return "sn1";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
......@@ -191,21 +192,19 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
if (lsapic->flags.enabled) {
available_cpus++;
if (!lsapic->flags.enabled)
printk(" disabled");
else if (available_cpus >= NR_CPUS)
printk(" ignored (increase NR_CPUS)");
else {
printk(" enabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
if (hard_smp_processor_id()
== (unsigned int) smp_boot_data.cpu_phys_id[total_cpus])
== (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
printk(" (BSP)");
#endif
}
else {
printk(" disabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = -1;
#endif
++available_cpus;
}
printk("\n");
......@@ -694,11 +693,11 @@ acpi_boot_init (void)
#endif
#ifdef CONFIG_SMP
smp_boot_data.cpu_count = available_cpus;
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = total_cpus;
smp_build_cpu_map();
# ifdef CONFIG_NUMA
......
This diff is collapsed.
......@@ -62,7 +62,7 @@ GLOBAL_ENTRY(efi_call_phys)
mov b6=r2
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
br.call.sptk.many rp=ia64_switch_mode
br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5
mov out0=in1
mov out1=in2
......@@ -73,7 +73,7 @@ GLOBAL_ENTRY(efi_call_phys)
br.call.sptk.many rp=b6 // call the EFI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
mov rp=loc0
......
This diff is collapsed.
......@@ -4,8 +4,9 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these!
*/
#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
#define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */
#define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */
......
This diff is collapsed.
.section .data.gate, "ax"
.incbin "arch/ia64/kernel/gate.so"
......@@ -6,20 +6,47 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/errno.h>
#include <asm/offsets.h>
#include <asm/sigcontext.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/page.h>
.section .text.gate, "ax"
.start_gate:
/*
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
* complications with the linker (which likes to create PLT stubs for branches
* to targets outside the shared object) and to avoid multi-phase kernel builds, we
* simply create minimalistic "patch lists" in special ELF sections.
*/
.section ".data.patch.fsyscall_table", "a"
.previous
#define LOAD_FSYSCALL_TABLE(reg) \
[1:] movl reg=0; \
.xdata4 ".data.patch.fsyscall_table", 1b-.
#ifdef CONFIG_FSYS
.section ".data.patch.brl_fsys_bubble_down", "a"
.previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \
.xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
#include <asm/errno.h>
GLOBAL_ENTRY(__kernel_syscall_via_break)
.prologue
.altrp b6
.body
/*
* Note: for (fast) syscall restart to work, the break instruction must be
* the first one in the bundle addressed by syscall_via_break.
*/
{ .mib
break 0x100000
nop.i 0
br.ret.sptk.many b6
}
END(__kernel_syscall_via_break)
/*
* On entry:
......@@ -34,7 +61,8 @@
* all other "scratch" registers: undefined
* all "preserved" registers: same as on entry
*/
GLOBAL_ENTRY(syscall_via_epc)
GLOBAL_ENTRY(__kernel_syscall_via_epc)
.prologue
.altrp b6
.body
......@@ -49,52 +77,50 @@ GLOBAL_ENTRY(syscall_via_epc)
epc
}
;;
rsm psr.be
movl r18=fsyscall_table
rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be"
LOAD_FSYSCALL_TABLE(r14)
mov r16=IA64_KR(CURRENT)
mov r19=255
mov r16=IA64_KR(CURRENT) // 12 cycle read latency
mov r19=NR_syscalls-1
;;
shladd r18=r17,3,r18
cmp.geu p6,p0=r19,r17 // (syscall > 0 && syscall <= 1024+255)?
shladd r18=r17,3,r14
srlz.d
cmp.ne p8,p0=r0,r0 // p8 <- FALSE
/* Note: if r17 is a NaT, p6 will be set to zero. */
cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)?
;;
srlz.d // ensure little-endian byteorder is in effect
(p6) ld8 r18=[r18]
mov r29=psr // read psr (12 cyc load latency)
add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) mov b7=r18
(p6) tbit.z p8,p0=r18,0
(p8) br.dptk.many b7
mov r27=ar.rsc
mov r21=ar.fpsr
mov r26=ar.pfs
/*
* brl.cond doesn't work as intended because the linker would convert this branch
* into a branch to a PLT. Perhaps there will be a way to avoid this with some
* future version of the linker. In the meantime, we just use an indirect branch
* instead.
*/
#ifdef CONFIG_ITANIUM
(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
;;
(p6) mov b7=r14
(p6) br.sptk.many b7
#else
BRL_COND_FSYS_BUBBLE_DOWN(p6)
#endif
mov r10=-1
mov r8=ENOSYS
MCKINLEY_E9_WORKAROUND
br.ret.sptk.many b6
END(syscall_via_epc)
GLOBAL_ENTRY(syscall_via_break)
.prologue
.altrp b6
.body
break 0x100000
br.ret.sptk.many b6
END(syscall_via_break)
GLOBAL_ENTRY(fsys_fallback_syscall)
/*
* It would be better/fsyser to do the SAVE_MIN magic directly here, but for now
* we simply fall back on doing a system-call via break. Good enough
* to get started. (Note: we have to do this through the gate page again, since
* the br.ret will switch us back to user-level privilege.)
*
* XXX Move this back to fsys.S after changing it over to avoid break 0x100000.
*/
movl r2=(syscall_via_break - .start_gate) + GATE_ADDR
;;
MCKINLEY_E9_WORKAROUND
mov b7=r2
br.ret.sptk.many b7
END(fsys_fallback_syscall)
#endif /* CONFIG_FSYS */
END(__kernel_syscall_via_epc)
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
......@@ -153,7 +179,7 @@ END(fsys_fallback_syscall)
.savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \
.vframesp SP_OFF+SIGCONTEXT_OFF
GLOBAL_ENTRY(ia64_sigtramp)
GLOBAL_ENTRY(__kernel_sigtramp)
// describe the state that is active when we get here:
.prologue
SIGTRAMP_SAVES
......@@ -335,4 +361,4 @@ restore_rbs:
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk back_from_restore_rbs
END(ia64_sigtramp)
END(__kernel_sigtramp)
/*
* Linker script for gate DSO. The gate pages are an ELF shared object prelinked to its
* virtual address, with only one read-only segment and one execute-only segment (both fit
* in one page). This script controls its layout.
*/
#include <linux/config.h>
#include <asm/system.h>
SECTIONS
{
. = GATE_ADDR + SIZEOF_HEADERS;
.hash : { *(.hash) } :readable
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.dynamic : { *(.dynamic) } :readable :dynamic
/*
* This linker script is used both with -r and with -shared. For the layouts to match,
* we need to skip more than enough space for the dynamic symbol table et al. If this
* amount is insufficient, ld -shared will barf. Just increase it here.
*/
. = GATE_ADDR + 0x500;
.data.patch : {
__start_gate_mckinley_e9_patchlist = .;
*(.data.patch.mckinley_e9)
__end_gate_mckinley_e9_patchlist = .;
__start_gate_vtop_patchlist = .;
*(.data.patch.vtop)
__end_gate_vtop_patchlist = .;
__start_gate_fsyscall_patchlist = .;
*(.data.patch.fsyscall_table)
__end_gate_fsyscall_patchlist = .;
__start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data.patch.brl_fsys_bubble_down)
__end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
.IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind
#ifdef HAVE_BUGGY_SEGREL
.text (GATE_ADDR + PAGE_SIZE) : { *(.text) *(.text.*) } :readable
#else
. = ALIGN (PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1));
.text : { *(.text) *(.text.*) } :epc
#endif
/DISCARD/ : {
*(.got.plt) *(.got)
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(__ex_table)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
readable PT_LOAD FILEHDR PHDRS FLAGS(4); /* PF_R */
#ifndef HAVE_BUGGY_SEGREL
epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */
#endif
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_2.5 {
global:
__kernel_syscall_via_break;
__kernel_syscall_via_epc;
__kernel_sigtramp;
local: *;
};
}
/* The ELF entry point can be used to set the AT_SYSINFO value. */
ENTRY(__kernel_syscall_via_epc)
......@@ -60,22 +60,42 @@ start_ap:
mov r4=r0
.body
/*
* Initialize the region register for region 7 and install a translation register
* that maps the kernel's text and data:
*/
rsm psr.i | psr.ic
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (IA64_GRANULE_SHIFT << 2))
;;
srlz.i
;;
/*
* Initialize kernel region registers:
* rr[5]: VHPT enabled, page size = PAGE_SHIFT
* rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
* rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT
*/
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
movl r17=(5<<61)
mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
movl r19=(6<<61)
mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
movl r21=(7<<61)
;;
mov rr[r17]=r16
mov rr[r19]=r18
mov rr[r21]=r20
;;
/*
* Now pin mappings into the TLB for kernel text and data
*/
mov r18=KERNEL_TR_PAGE_SHIFT<<2
movl r17=KERNEL_START
;;
mov rr[r17]=r16
mov cr.itir=r18
mov cr.ifa=r17
mov r16=IA64_TR_KERNEL
movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL)
mov r3=ip
movl r18=PAGE_KERNEL
;;
dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
;;
or r18=r2,r18
;;
srlz.i
;;
......@@ -113,16 +133,6 @@ start_ap:
mov ar.fpsr=r2
;;
#ifdef CONFIG_IA64_EARLY_PRINTK
mov r3=(6<<8) | (IA64_GRANULE_SHIFT<<2)
movl r2=6<<61
;;
mov rr[r2]=r3
;;
srlz.i
;;
#endif
#define isAP p2 // are we an Application Processor?
#define isBP p3 // are we the Bootstrap Processor?
......@@ -143,12 +153,36 @@ start_ap:
movl r2=init_thread_union
cmp.eq isBP,isAP=r0,r0
#endif
mov r16=KERNEL_TR_PAGE_NUM
;;
tpa r3=r2 // r3 == phys addr of task struct
// load mapping for stack (virtaddr in r2, physaddr in r3)
rsm psr.ic
movl r17=PAGE_KERNEL
;;
srlz.d
dep r18=0,r3,0,12
;;
or r18=r17,r18
dep r2=-1,r3,61,3 // IMVA of task
;;
mov r17=rr[r2]
shr.u r16=r3,IA64_GRANULE_SHIFT
;;
dep r17=0,r17,8,24
;;
mov cr.itir=r17
mov cr.ifa=r2
mov r19=IA64_TR_CURRENT_STACK
;;
itr.d dtr[r19]=r18
;;
ssm psr.ic
srlz.d
;;
// load the "current" pointer (r13) and ar.k6 with the current task
mov IA64_KR(CURRENT)=r2 // virtual address
// initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL)
mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2
/*
......@@ -665,14 +699,14 @@ GLOBAL_ENTRY(__ia64_init_fpu)
END(__ia64_init_fpu)
/*
* Switch execution mode from virtual to physical or vice versa.
* Switch execution mode from virtual to physical
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
GLOBAL_ENTRY(ia64_switch_mode)
GLOBAL_ENTRY(ia64_switch_mode_phys)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
......@@ -682,35 +716,86 @@ GLOBAL_ENTRY(ia64_switch_mode)
{
flushrs // must be first insn in group
srlz.i
shr.u r19=r15,61 // r19 <- top 3 bits of current IP
}
;;
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode,r15
xor r15=0x7,r19 // flip the region bits
add r3=1f-ia64_switch_mode_phys,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
;;
// switch RSE backing store:
// going to physical mode, use tpa to translate virt->phys
tpa r17=r17
tpa r3=r3
tpa sp=sp
tpa r14=r14
;;
dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
mov r18=ar.rnat // save ar.rnat
;;
mov ar.bspstore=r17 // this steps on ar.rnat
dep r3=r15,r3,61,3 // make rfi return address physical or virtual
mov cr.iip=r3
mov cr.ifs=r0
;;
mov ar.rnat=r18 // restore ar.rnat
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.many rp
END(ia64_switch_mode_phys)
/*
* Switch execution mode from physical to virtual
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
;;
{
flushrs // must be first insn in group
srlz.i
}
;;
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode_virt,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
;;
// going to virtual
// - for code addresses, set upper bits of addr to KERNEL_START
// - for stack addresses, set upper 3 bits to 0xe.... Dont change any of the
// lower bits since we want it to stay identity mapped
movl r18=KERNEL_START
dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r17=-1,r17,61,3
dep sp=-1,sp,61,3
;;
or r3=r3,r18
or r14=r14,r18
;;
mov r18=ar.rnat // save ar.rnat
mov ar.bspstore=r17 // this steps on ar.rnat
mov cr.iip=r3
mov cr.ifs=r0
dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;;
mov ar.rnat=r18 // restore ar.rnat
dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.many rp
END(ia64_switch_mode)
END(ia64_switch_mode_virt)
#ifdef CONFIG_IA64_BRL_EMU
......@@ -753,7 +838,7 @@ SET_REG(b5);
* r29 - available for use.
* r30 - available for use.
* r31 - address of lock, available for use.
* b7 - return address
* b6 - return address
* p14 - available for use.
*
* If you patch this code to use more registers, do not forget to update
......
......@@ -65,6 +65,9 @@ EXPORT_SYMBOL(ia64_pfn_valid);
#include <asm/processor.h>
EXPORT_SYMBOL(cpu_info__per_cpu);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__per_cpu_offset);
#endif
EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h>
......@@ -88,6 +91,7 @@ EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#else /* !CONFIG_SMP */
......@@ -124,6 +128,18 @@ EXPORT_SYMBOL_NOVERS(__udivdi3);
EXPORT_SYMBOL_NOVERS(__moddi3);
EXPORT_SYMBOL_NOVERS(__umoddi3);
#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
extern void xor_ia64_5(void);
EXPORT_SYMBOL_NOVERS(xor_ia64_2);
EXPORT_SYMBOL_NOVERS(xor_ia64_3);
EXPORT_SYMBOL_NOVERS(xor_ia64_4);
EXPORT_SYMBOL_NOVERS(xor_ia64_5);
#endif
extern unsigned long ia64_iobase;
EXPORT_SYMBOL(ia64_iobase);
......@@ -147,10 +163,15 @@ EXPORT_SYMBOL(efi_dir);
EXPORT_SYMBOL(ia64_mv);
#endif
EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(machvec_memory_fence);
EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_register_buffer_fmt);
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
EXPORT_SYMBOL(pfm_mod_read_pmds);
EXPORT_SYMBOL(pfm_mod_write_pmcs);
#endif
#ifdef CONFIG_NUMA
......@@ -169,10 +190,25 @@ EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
extern void ia64_spinlock_contention_pre3_4 (void);
#ifdef CONFIG_SMP
# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
/*
* This is not a normal routine and we don't want a function descriptor for it, so we use
* a fake declaration here.
*/
extern char ia64_spinlock_contention_pre3_4;
EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
#else
extern void ia64_spinlock_contention (void);
# else
/*
* This is not a normal routine and we don't want a function descriptor for it, so we use
* a fake declaration here.
*/
extern char ia64_spinlock_contention;
EXPORT_SYMBOL(ia64_spinlock_contention);
# endif
#endif
EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
#include <linux/pm.h>
EXPORT_SYMBOL(pm_idle);
......@@ -36,7 +36,7 @@ union init_thread {
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_thread_union __attribute__((section(".data.init_task"))) = {{
.task = INIT_TASK(init_thread_union.s.task),
.thread_info = INIT_THREAD_INFO(init_thread_union.s.thread_info)
.thread_info = INIT_THREAD_INFO(init_thread_union.s.task)
}};
asm (".global init_task; init_task = init_thread_union");
This diff is collapsed.
......@@ -322,7 +322,7 @@ fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_sta
}
void
init_handler_platform (sal_log_processor_info_t *proc_ptr,
init_handler_platform (pal_min_state_area_t *ms,
struct pt_regs *pt, struct switch_stack *sw)
{
struct unw_frame_info info;
......@@ -337,15 +337,18 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr,
*/
printk("Delaying for 5 seconds...\n");
udelay(5*1000000);
show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
show_min_state(ms);
printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw);
fetch_min_state(ms, pt, sw);
unw_init_from_interruption(&info, current, pt, sw);
ia64_do_show_stack(&info, NULL);
#ifdef CONFIG_SMP
/* read_trylock() would be handy... */
if (!tasklist_lock.write_lock)
read_lock(&tasklist_lock);
#endif
{
struct task_struct *g, *t;
do_each_thread (g, t) {
......@@ -353,11 +356,13 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr,
continue;
printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
show_stack(t);
show_stack(t, NULL);
} while_each_thread (g, t);
}
#ifdef CONFIG_SMP
if (!tasklist_lock.write_lock)
read_unlock(&tasklist_lock);
#endif
printk("\nINIT dump complete. Please reboot now.\n");
while (1); /* hang city if no debugger */
......@@ -657,17 +662,17 @@ ia64_mca_init(void)
IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n");
ia64_mc_info.imi_mca_handler = __pa(mca_hldlr_ptr->fp);
ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/*
* XXX - disable SAL checksum by setting size to 0; should be
* __pa(ia64_os_mca_dispatch_end) - __pa(ia64_os_mca_dispatch);
* ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/
ia64_mc_info.imi_mca_handler_size = 0;
/* Register the os mca handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
ia64_mc_info.imi_mca_handler,
mca_hldlr_ptr->gp,
ia64_tpa(mca_hldlr_ptr->gp),
ia64_mc_info.imi_mca_handler_size,
0, 0, 0)))
{
......@@ -677,15 +682,15 @@ ia64_mca_init(void)
}
IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n",
ia64_mc_info.imi_mca_handler, mca_hldlr_ptr->gp);
ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/*
* XXX - disable SAL checksum by setting size to 0, should be
* IA64_INIT_HANDLER_SIZE
*/
ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler_size = 0;
ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler_size = 0;
IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n",
......@@ -694,10 +699,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
__pa(ia64_get_gp()),
ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler,
__pa(ia64_get_gp()),
ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_slave_init_handler_size)))
{
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
......@@ -1235,32 +1240,19 @@ device_initcall(ia64_mca_late_init);
void
ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
{
sal_log_processor_info_t *proc_ptr;
ia64_err_rec_t *plog_ptr;
pal_min_state_area_t *ms;
printk(KERN_INFO "Entered OS INIT handler\n");
/* Get the INIT processor log */
if (!ia64_log_get(SAL_INFO_TYPE_INIT, (prfunc_t)printk))
return; // no record retrieved
#ifdef IA64_DUMP_ALL_PROC_INFO
ia64_log_print(SAL_INFO_TYPE_INIT, (prfunc_t)printk);
#endif
printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
ia64_sal_to_os_handoff_state.proc_state_param);
/*
* get pointer to min state save area
*
* Address of minstate area provided by PAL is physical,
* uncacheable (bit 63 set). Convert to Linux virtual
* address in region 6.
*/
plog_ptr=(ia64_err_rec_t *)IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_INIT);
proc_ptr = &plog_ptr->proc_err;
ia64_process_min_state_save(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
/* Clear the INIT SAL logs now that they have been saved in the OS buffer */
ia64_sal_clear_state_info(SAL_INFO_TYPE_INIT);
ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
init_handler_platform(proc_ptr, pt, sw); /* call platform specific routines */
init_handler_platform(ms, pt, sw); /* call platform specific routines */
}
/*
......
......@@ -50,14 +50,15 @@
* 6. GR12 = Return address to location within SAL_CHECK
*/
#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
movl _tmp=ia64_sal_to_os_handoff_state;; \
DATA_VA_TO_PA(_tmp);; \
LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
st8 [_tmp]=r1,0x08;; \
st8 [_tmp]=r8,0x08;; \
st8 [_tmp]=r9,0x08;; \
st8 [_tmp]=r10,0x08;; \
st8 [_tmp]=r11,0x08;; \
st8 [_tmp]=r12,0x08
st8 [_tmp]=r12,0x08;; \
st8 [_tmp]=r17,0x08;; \
st8 [_tmp]=r18,0x08
/*
* OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
......@@ -70,9 +71,8 @@
* returns ptr to SAL rtn save loc in _tmp
*/
#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
(p6) movl _tmp=ia64_sal_to_os_handoff_state;; \
(p7) movl _tmp=ia64_os_to_sal_handoff_state;; \
DATA_VA_TO_PA(_tmp);; \
LOAD_PHYSICAL(p6, _tmp, ia64_sal_to_os_handoff_state);; \
LOAD_PHYSICAL(p7, _tmp, ia64_os_to_sal_handoff_state);; \
(p6) movl r8=IA64_MCA_COLD_BOOT; \
(p6) movl r10=IA64_MCA_SAME_CONTEXT; \
(p6) add _tmp=0x18,_tmp;; \
......
This diff is collapsed.
......@@ -18,7 +18,8 @@
LTOFF22X
LTOFF22X
LTOFF_FPTR22
PCREL21B
PCREL21B (for br.call only; br.cond is not supported out of modules!)
PCREL60B (for brl.cond only; brl.call is not supported for modules!)
PCREL64LSB
SECREL32LSB
SEGREL64LSB
......@@ -33,6 +34,7 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/patch.h>
#include <asm/unaligned.h>
#define ARCH_MODULE_DEBUG 0
......@@ -158,27 +160,6 @@ slot (const struct insn *insn)
return (uint64_t) insn & 0x3;
}
/* Patch instruction with "val" where "mask" has 1 bits. */
static void
apply (struct insn *insn, uint64_t mask, uint64_t val)
{
uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) bundle(insn);
# define insn_mask ((1UL << 41) - 1)
unsigned long shift;
b0 = b[0]; b1 = b[1];
shift = 5 + 41 * slot(insn); /* 5 bits of template, then 3 x 41-bit instructions */
if (shift >= 64) {
m1 = mask << (shift - 64);
v1 = val << (shift - 64);
} else {
m0 = mask << shift; m1 = mask >> (64 - shift);
v0 = val << shift; v1 = val >> (64 - shift);
b[0] = (b0 & ~m0) | (v0 & m0);
}
b[1] = (b1 & ~m1) | (v1 & m1);
}
static int
apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
{
......@@ -187,12 +168,7 @@ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
mod->name, slot(insn));
return 0;
}
apply(insn, 0x01fffefe000, ( ((val & 0x8000000000000000) >> 27) /* bit 63 -> 36 */
| ((val & 0x0000000000200000) << 0) /* bit 21 -> 21 */
| ((val & 0x00000000001f0000) << 6) /* bit 16 -> 22 */
| ((val & 0x000000000000ff80) << 20) /* bit 7 -> 27 */
| ((val & 0x000000000000007f) << 13) /* bit 0 -> 13 */));
apply((void *) insn - 1, 0x1ffffffffff, val >> 22);
ia64_patch_imm64((u64) insn, val);
return 1;
}
......@@ -208,9 +184,7 @@ apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
return 0;
}
apply(insn, 0x011ffffe000, ( ((val & 0x1000000000000000) >> 24) /* bit 60 -> 36 */
| ((val & 0x00000000000fffff) << 13) /* bit 0 -> 13 */));
apply((void *) insn - 1, 0x1fffffffffc, val >> 18);
ia64_patch_imm60((u64) insn, val);
return 1;
}
......@@ -221,10 +195,10 @@ apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
return 0;
}
apply(insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */
| ((val & 0x1f0000) << 6) /* bit 16 -> 22 */
| ((val & 0x00ff80) << 20) /* bit 7 -> 27 */
| ((val & 0x00007f) << 13) /* bit 0 -> 13 */));
ia64_patch((u64) insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */
| ((val & 0x1f0000) << 6) /* bit 16 -> 22 */
| ((val & 0x00ff80) << 20) /* bit 7 -> 27 */
| ((val & 0x00007f) << 13) /* bit 0 -> 13 */));
return 1;
}
......@@ -235,8 +209,8 @@ apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
return 0;
}
apply(insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */
| ((val & 0x0fffff) << 13) /* bit 0 -> 13 */));
ia64_patch((u64) insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */
| ((val & 0x0fffff) << 13) /* bit 0 -> 13 */));
return 1;
}
......@@ -281,7 +255,7 @@ plt_target (struct plt_entry *plt)
b0 = b[0]; b1 = b[1];
off = ( ((b1 & 0x00fffff000000000) >> 36) /* imm20b -> bit 0 */
| ((b0 >> 48) << 20) | ((b1 & 0x7fffff) << 36) /* imm39 -> bit 20 */
| ((b1 & 0x0800000000000000) << 1)); /* i -> bit 60 */
| ((b1 & 0x0800000000000000) << 0)); /* i -> bit 59 */
return (long) plt->bundle[1] + 16*off;
}
......@@ -751,7 +725,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */
DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
apply(location, 0x1fff80fe000, 0x10000000000);
ia64_patch((u64) location, 0x1fff80fe000, 0x10000000000);
}
return 0;
......
......@@ -164,7 +164,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
dep.z r8=r8,0,61 // convert rp to physical
tpa r8=r8 // convert rp to physical
;;
mov b7 = loc2 // install target to branch reg
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
......@@ -174,13 +174,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
or loc3=loc3,r17 // add in psr the bits to set
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode
br.call.sptk.many rp=ia64_switch_mode_phys
.ret1: mov rp = r8 // install return address (physical)
br.cond.sptk.many b7
1:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2:
mov psr.l = loc3 // restore init PSR
......@@ -228,13 +228,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
mov b7 = loc2 // install target to branch reg
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode
br.call.sptk.many rp=ia64_switch_mode_phys
.ret6:
br.call.sptk.many rp=b7 // now make the call
.ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* This file contains the architected PMU register description tables
* This file contains the generic PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined (CONFIG_MCKINLEY)
#error "This file should not be used when CONFIG_ITANIUM or CONFIG_MCKINLEY is defined"
#endif
static pfm_reg_desc_t pmc_gen_desc[PMU_MAX_PMCS]={
static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
......@@ -23,7 +25,7 @@ static pfm_reg_desc_t pmc_gen_desc[PMU_MAX_PMCS]={
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_gen_desc[PMU_MAX_PMDS]={
static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
......@@ -39,10 +41,13 @@ static pfm_reg_desc_t pmd_gen_desc[PMU_MAX_PMDS]={
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.disabled = 1,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 8,
.num_dbrs = 8,
.pmd_desc = pfm_gen_pmd_desc,
.pmc_desc = pfm_gen_pmc_desc
.pmu_name = "Generic",
.pmu_family = 0xff, /* any */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */
.pmd_desc = pfm_gen_pmd_desc,
.pmc_desc = pfm_gen_pmc_desc
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,7 +2,7 @@
* This file contains various system calls that have different calling
* conventions on different platforms.
*
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
.rodata
data4 @segrel(start)
.data
start:
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment