Commit c3285854 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.4.0-test2pre11

parent 470d9268
...@@ -14290,7 +14290,7 @@ CONFIG_OBP_FLASH ...@@ -14290,7 +14290,7 @@ CONFIG_OBP_FLASH
The OpenBoot PROM on Ultra systems is flashable. If you want to be The OpenBoot PROM on Ultra systems is flashable. If you want to be
able to upgrade the OBP firmware, say Y here. able to upgrade the OBP firmware, say Y here.
JavaStation OS Flash SIMM (EXPERIMENTAL) JavaStation OS Flash SIMM
CONFIG_SUN_JSFLASH CONFIG_SUN_JSFLASH
If you say Y here, you will be able to boot from your JavaStation's If you say Y here, you will be able to boot from your JavaStation's
Flash memory. Flash memory.
......
...@@ -291,9 +291,12 @@ FILES_FLAGS_CHANGED := $(strip \ ...@@ -291,9 +291,12 @@ FILES_FLAGS_CHANGED := $(strip \
)) ))
# A kludge: .S files don't get flag dependencies (yet), # A kludge: .S files don't get flag dependencies (yet),
# because that will involve changing a lot of Makefiles. # because that will involve changing a lot of Makefiles. Also
# suppress object files explicitly listed in $(IGNORE_FLAGS_OBJS).
# This allows handling of assembly files that get translated into
# multiple object files (see arch/ia64/lib/idiv.S, for example).
FILES_FLAGS_CHANGED := $(strip \ FILES_FLAGS_CHANGED := $(strip \
$(filter-out $(patsubst %.S, %.o, $(wildcard *.S)), \ $(filter-out $(patsubst %.S, %.o, $(wildcard *.S) $(IGNORE_FLAGS_OBJS)), \
$(FILES_FLAGS_CHANGED))) $(FILES_FLAGS_CHANGED)))
ifneq ($(FILES_FLAGS_CHANGED),) ifneq ($(FILES_FLAGS_CHANGED),)
......
...@@ -101,6 +101,10 @@ EXPORT_SYMBOL(strnlen_user); ...@@ -101,6 +101,10 @@ EXPORT_SYMBOL(strnlen_user);
EXPORT_SYMBOL(pci_alloc_consistent); EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent); EXPORT_SYMBOL(pci_free_consistent);
#ifdef CONFIG_PCI
EXPORT_SYMBOL(pcibios_penalize_isa_irq);
#endif
#ifdef CONFIG_X86_USE_3DNOW #ifdef CONFIG_X86_USE_3DNOW
EXPORT_SYMBOL(_mmx_memcpy); EXPORT_SYMBOL(_mmx_memcpy);
EXPORT_SYMBOL(mmx_clear_page); EXPORT_SYMBOL(mmx_clear_page);
......
...@@ -68,4 +68,4 @@ extern unsigned int pcibios_irq_mask; ...@@ -68,4 +68,4 @@ extern unsigned int pcibios_irq_mask;
void pcibios_irq_init(void); void pcibios_irq_init(void);
void pcibios_fixup_irqs(void); void pcibios_fixup_irqs(void);
int pcibios_lookup_irq(struct pci_dev *dev, int assign); void pcibios_enable_irq(struct pci_dev *dev);
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
#include "pci-i386.h" #include "pci-i386.h"
extern int skip_ioapic_setup;
#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
#define PIRQ_VERSION 0x0100 #define PIRQ_VERSION 0x0100
...@@ -27,13 +27,13 @@ static struct irq_routing_table *pirq_table; ...@@ -27,13 +27,13 @@ static struct irq_routing_table *pirq_table;
/* /*
* Never use: 0, 1, 2 (timer, keyboard, and cascade) * Never use: 0, 1, 2 (timer, keyboard, and cascade)
* Avoid using: 13, 14 and 15 (FP error and IDE). * Avoid using: 13, 14 and 15 (FP error and IDE).
* Penalize: 3, 4, 7, 12 (known ISA uses: serial, parallel and mouse) * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
*/ */
unsigned int pcibios_irq_mask = 0xfff8; unsigned int pcibios_irq_mask = 0xfff8;
static unsigned pirq_penalty[16] = { static int pirq_penalty[16] = {
10000, 10000, 10000, 100, 100, 0, 0, 100, 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
0, 0, 0, 0, 100, 1000, 1000, 1000 0, 0, 0, 0, 1000, 100000, 100000, 100000
}; };
struct irq_router { struct irq_router {
...@@ -222,7 +222,7 @@ static struct irq_router pirq_routers[] = { ...@@ -222,7 +222,7 @@ static struct irq_router pirq_routers[] = {
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, pirq_piix_get, pirq_piix_set }, { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, pirq_piix_get, pirq_piix_set }, { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set }, { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82440MX_1, pirq_piix_get, pirq_piix_set }, { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_0, pirq_piix_get, pirq_piix_set },
{ "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set }, { "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set }, { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set }, { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set },
...@@ -287,7 +287,11 @@ static struct irq_info *pirq_get_info(struct pci_dev *dev, int pin) ...@@ -287,7 +287,11 @@ static struct irq_info *pirq_get_info(struct pci_dev *dev, int pin)
return NULL; return NULL;
} }
int pcibios_lookup_irq(struct pci_dev *dev, int assign) static void pcibios_test_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{
}
static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
{ {
struct irq_info *info; struct irq_info *info;
int i, pirq, pin, newirq; int i, pirq, pin, newirq;
...@@ -323,19 +327,24 @@ int pcibios_lookup_irq(struct pci_dev *dev, int assign) ...@@ -323,19 +327,24 @@ int pcibios_lookup_irq(struct pci_dev *dev, int assign)
/* Find the best IRQ to assign */ /* Find the best IRQ to assign */
newirq = 0; newirq = 0;
for (i = 0; i < 16; i++) { if (assign) {
if (!(mask & (1 << i))) for (i = 0; i < 16; i++) {
continue; if (!(mask & (1 << i)))
if (pirq_penalty[i] < pirq_penalty[newirq]) continue;
newirq = i; if (pirq_penalty[i] < pirq_penalty[newirq] &&
!request_irq(i, pcibios_test_irq_handler, SA_SHIRQ, "pci-test", dev)) {
free_irq(i, dev);
newirq = i;
}
}
DBG(" -> newirq=%d", newirq);
} }
DBG(" -> newirq=%d", newirq);
/* Try to get current IRQ */ /* Try to get current IRQ */
if (r->get && (irq = r->get(pirq_router_dev, d, pirq))) { if (r->get && (irq = r->get(pirq_router_dev, d, pirq))) {
DBG(" -> got IRQ %d\n", irq); DBG(" -> got IRQ %d\n", irq);
msg = "Found"; msg = "Found";
} else if (assign && newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
DBG(" -> assigning IRQ %d", newirq); DBG(" -> assigning IRQ %d", newirq);
if (r->set(pirq_router_dev, d, pirq, newirq)) { if (r->set(pirq_router_dev, d, pirq, newirq)) {
DBG(" ... OK\n"); DBG(" ... OK\n");
...@@ -346,7 +355,7 @@ int pcibios_lookup_irq(struct pci_dev *dev, int assign) ...@@ -346,7 +355,7 @@ int pcibios_lookup_irq(struct pci_dev *dev, int assign)
if (!irq) { if (!irq) {
DBG(" ... failed\n"); DBG(" ... failed\n");
if (assign && newirq && mask == (1 << newirq)) { if (newirq && mask == (1 << newirq)) {
msg = "Guessed"; msg = "Guessed";
irq = newirq; irq = newirq;
} else } else
...@@ -379,6 +388,15 @@ void __init pcibios_irq_init(void) ...@@ -379,6 +388,15 @@ void __init pcibios_irq_init(void)
if (pirq_table) { if (pirq_table) {
pirq_peer_trick(); pirq_peer_trick();
pirq_find_router(); pirq_find_router();
if (pirq_table->exclusive_irqs) {
int i;
for (i=0; i<16; i++)
if (!(pirq_table->exclusive_irqs & (1 << i)))
pirq_penalty[i] += 100;
}
/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
if (io_apic_assign_pci_irqs)
pirq_table = NULL;
} }
} }
...@@ -397,16 +415,19 @@ void __init pcibios_fixup_irqs(void) ...@@ -397,16 +415,19 @@ void __init pcibios_fixup_irqs(void)
DBG("%s: ignoring bogus IRQ %d\n", dev->slot_name, dev->irq); DBG("%s: ignoring bogus IRQ %d\n", dev->slot_name, dev->irq);
dev->irq = 0; dev->irq = 0;
} }
/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
pirq_penalty[dev->irq] = 0;
pirq_penalty[dev->irq]++; pirq_penalty[dev->irq]++;
} }
pci_for_each_dev(dev) { pci_for_each_dev(dev) {
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
#if defined(CONFIG_X86_IO_APIC) #ifdef CONFIG_X86_IO_APIC
/* /*
* Recalculate IRQ numbers if we use the I/O APIC. * Recalculate IRQ numbers if we use the I/O APIC.
*/ */
if (!skip_ioapic_setup) if (io_apic_assign_pci_irqs)
{ {
int irq; int irq;
...@@ -443,3 +464,31 @@ void __init pcibios_fixup_irqs(void) ...@@ -443,3 +464,31 @@ void __init pcibios_fixup_irqs(void)
pcibios_lookup_irq(dev, 0); pcibios_lookup_irq(dev, 0);
} }
} }
void __init pcibios_penalize_isa_irq(int irq)
{
/*
* If any ISAPnP device reports an IRQ in its list of possible
* IRQ's, we try to avoid assigning it to PCI devices.
*/
pirq_penalty[irq] += 100;
}
void pcibios_enable_irq(struct pci_dev *dev)
{
if (!dev->irq) {
u8 pin;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin && !pcibios_lookup_irq(dev, 1)) {
char *msg;
if (io_apic_assign_pci_irqs)
msg = " Probably buggy MP table.";
else if (pci_probe & PCI_BIOS_IRQ_SCAN)
msg = "";
else
msg = " Please try using pci=biosirq.";
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
'A' + pin - 1, dev->slot_name, msg);
}
}
}
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h>
#include "pci-i386.h" #include "pci-i386.h"
...@@ -1046,13 +1045,6 @@ int pcibios_enable_device(struct pci_dev *dev) ...@@ -1046,13 +1045,6 @@ int pcibios_enable_device(struct pci_dev *dev)
if ((err = pcibios_enable_resources(dev)) < 0) if ((err = pcibios_enable_resources(dev)) < 0)
return err; return err;
if (!dev->irq) { pcibios_enable_irq(dev);
u8 pin;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin && !pcibios_lookup_irq(dev, 1))
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
'A' + pin - 1, dev->slot_name,
(pci_probe & PCI_BIOS_IRQ_SCAN) ? "" : " Please try using pci=biosirq.");
}
return 0; return 0;
} }
...@@ -135,3 +135,7 @@ int pcibios_enable_device(struct pci_dev *dev) ...@@ -135,3 +135,7 @@ int pcibios_enable_device(struct pci_dev *dev)
{ {
return pcibios_enable_resources(dev); return pcibios_enable_resources(dev);
} }
void __init pcibios_penalize_isa_irq(irq)
{
}
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
* Gareth Hughes <gareth@valinux.com>, May 2000 * Gareth Hughes <gareth@valinux.com>, May 2000
*/ */
#include <linux/config.h> /* for CONFIG_MATH_EMULATION */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
*/ */
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
......
...@@ -12,15 +12,11 @@ NM := $(CROSS_COMPILE)nm -B ...@@ -12,15 +12,11 @@ NM := $(CROSS_COMPILE)nm -B
AWK := awk AWK := awk
LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds
# next line is for HP compiler backend: AFLAGS += -Wa,-x
#AFLAGS += -DGCC_RETVAL_POINTER_IN_R8 EXTRA =
# The next line is needed when compiling with the July snapshot of the Cygnus compiler:
#EXTRA = -D__GCC_DOESNT_KNOW_IN_REGS__ CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
# next two lines are for the September snapshot of the Cygnus compiler: -mconstant-gp -funwind-tables
AFLAGS += -D__GCC_MULTIREG_RETVALS__ -Wa,-x
EXTRA = -D__GCC_MULTIREG_RETVALS__
CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127
ifdef CONFIG_IA64_GENERIC ifdef CONFIG_IA64_GENERIC
CORE_FILES := arch/$(ARCH)/hp/hp.a \ CORE_FILES := arch/$(ARCH)/hp/hp.a \
......
...@@ -25,7 +25,8 @@ endif ...@@ -25,7 +25,8 @@ endif
all: $(TARGETS) all: $(TARGETS)
bootloader: $(OBJECTS) bootloader: $(OBJECTS)
$(LD) $(LINKFLAGS) $(OBJECTS) $(LIBS) -o bootloader $(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
-o bootloader
clean: clean:
rm -f $(TARGETS) rm -f $(TARGETS)
......
...@@ -7,7 +7,6 @@ mainmenu_option next_comment ...@@ -7,7 +7,6 @@ mainmenu_option next_comment
comment 'General setup' comment 'General setup'
define_bool CONFIG_IA64 y define_bool CONFIG_IA64 y
define_bool CONFIG_ITANIUM y # easy choice for now... ;-)
define_bool CONFIG_ISA n define_bool CONFIG_ISA n
define_bool CONFIG_SBUS n define_bool CONFIG_SBUS n
...@@ -25,10 +24,13 @@ choice 'Kernel page size' \ ...@@ -25,10 +24,13 @@ choice 'Kernel page size' \
64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB 64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB
if [ "$CONFIG_IA64_DIG" = "y" ]; then if [ "$CONFIG_IA64_DIG" = "y" ]; then
define_bool CONFIG_ITANIUM y
define_bool CONFIG_IA64_BRL_EMU y
bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC
bool ' Enable Itanium A1-step specific code' CONFIG_ITANIUM_A1_SPECIFIC
bool ' Enable use of global TLB purge instruction (ptc.g)' CONFIG_ITANIUM_PTCG
bool ' Enable SoftSDV hacks' CONFIG_IA64_SOFTSDV_HACKS bool ' Enable SoftSDV hacks' CONFIG_IA64_SOFTSDV_HACKS
bool ' Enable BigSur hacks' CONFIG_IA64_BIGSUR_HACKS bool ' Enable AzusA hacks' CONFIG_IA64_AZUSA_HACKS
bool ' Enable Lion hacks' CONFIG_IA64_LION_HACKS
bool ' Emulate PAL/SAL/EFI firmware' CONFIG_IA64_FW_EMU bool ' Emulate PAL/SAL/EFI firmware' CONFIG_IA64_FW_EMU
bool ' Enable IA64 Machine Check Abort' CONFIG_IA64_MCA bool ' Enable IA64 Machine Check Abort' CONFIG_IA64_MCA
fi fi
...@@ -46,6 +48,7 @@ define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore. ...@@ -46,6 +48,7 @@ define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore.
bool 'SMP support' CONFIG_SMP bool 'SMP support' CONFIG_SMP
bool 'Performance monitor support' CONFIG_PERFMON bool 'Performance monitor support' CONFIG_PERFMON
bool '/proc/palinfo support' CONFIG_IA64_PALINFO
bool 'Networking support' CONFIG_NET bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC bool 'System V IPC' CONFIG_SYSVIPC
...@@ -190,5 +193,6 @@ bool 'Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK ...@@ -190,5 +193,6 @@ bool 'Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK
bool 'Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG bool 'Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
bool 'Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ bool 'Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ
bool 'Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS bool 'Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS
bool 'Enable new unwind support' CONFIG_IA64_NEW_UNWIND
endmenu endmenu
...@@ -115,8 +115,8 @@ CONFIG_BLK_DEV_IDEDMA=y ...@@ -115,8 +115,8 @@ CONFIG_BLK_DEV_IDEDMA=y
CONFIG_IDEDMA_PCI_EXPERIMENTAL=y CONFIG_IDEDMA_PCI_EXPERIMENTAL=y
# CONFIG_IDEDMA_PCI_WIP is not set # CONFIG_IDEDMA_PCI_WIP is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set # CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
# CONFIG_BLK_DEV_AEC62XX is not set # CONFIG_BLK_DEV_AEC6210 is not set
# CONFIG_AEC62XX_TUNING is not set # CONFIG_AEC6210_TUNING is not set
# CONFIG_BLK_DEV_ALI15X3 is not set # CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set # CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD7409 is not set # CONFIG_BLK_DEV_AMD7409 is not set
......
...@@ -67,6 +67,12 @@ set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delive ...@@ -67,6 +67,12 @@ set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delive
(delivery << IO_SAPIC_DELIVERY_SHIFT) | (delivery << IO_SAPIC_DELIVERY_SHIFT) |
vector); vector);
#ifdef CONFIG_IA64_AZUSA_HACKS
/* set Flush Disable bit */
if (iosapic_addr != 0xc0000000fec00000)
low32 |= (1 << 17);
#endif
/* dest contains both id and eid */ /* dest contains both id and eid */
high32 = (dest << IO_SAPIC_DEST_SHIFT); high32 = (dest << IO_SAPIC_DEST_SHIFT);
...@@ -216,30 +222,33 @@ iosapic_version (unsigned long base_addr) ...@@ -216,30 +222,33 @@ iosapic_version (unsigned long base_addr)
} }
void void
iosapic_init (unsigned long address) iosapic_init (unsigned long address, int irqbase)
{ {
struct hw_interrupt_type *irq_type; struct hw_interrupt_type *irq_type;
struct pci_vector_struct *vectors; struct pci_vector_struct *vectors;
int i, irq; int i, irq;
/* if (irqbase == 0)
* Map the legacy ISA devices into the IOSAPIC data. Some of /*
* these may get reprogrammed later on with data from the ACPI * Map the legacy ISA devices into the IOSAPIC data.
* Interrupt Source Override table. * Some of these may get reprogrammed later on with
*/ * data from the ACPI Interrupt Source Override table.
for (i = 0; i < 16; i++) { */
irq = isa_irq_to_vector(i); for (i = 0; i < 16; i++) {
iosapic_pin(irq) = i; irq = isa_irq_to_vector(i);
iosapic_bus(irq) = BUS_ISA; iosapic_pin(irq) = i;
iosapic_busdata(irq) = 0; iosapic_bus(irq) = BUS_ISA;
iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY; iosapic_busdata(irq) = 0;
iosapic_trigger(irq) = IO_SAPIC_EDGE; iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY;
iosapic_polarity(irq) = IO_SAPIC_POL_HIGH; iosapic_trigger(irq) = IO_SAPIC_EDGE;
iosapic_polarity(irq) = IO_SAPIC_POL_HIGH;
#ifdef DEBUG_IRQ_ROUTING #ifdef DEBUG_IRQ_ROUTING
printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n", i, irq, iosapic_pin(irq)); printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n",
i, irq, iosapic_pin(irq));
#endif #endif
} }
#ifndef CONFIG_IA64_SOFTSDV_HACKS
/* /*
* Map the PCI Interrupt data into the ACPI IOSAPIC data using * Map the PCI Interrupt data into the ACPI IOSAPIC data using
* the info that the bootstrap loader passed to us. * the info that the bootstrap loader passed to us.
...@@ -250,6 +259,8 @@ iosapic_init (unsigned long address) ...@@ -250,6 +259,8 @@ iosapic_init (unsigned long address)
irq = vectors[i].irq; irq = vectors[i].irq;
if (irq < 16) if (irq < 16)
irq = isa_irq_to_vector(irq); irq = isa_irq_to_vector(irq);
if (iosapic_baseirq(irq) != irqbase)
continue;
iosapic_bustype(irq) = BUS_PCI; iosapic_bustype(irq) = BUS_PCI;
iosapic_pin(irq) = irq - iosapic_baseirq(irq); iosapic_pin(irq) = irq - iosapic_baseirq(irq);
...@@ -270,8 +281,12 @@ iosapic_init (unsigned long address) ...@@ -270,8 +281,12 @@ iosapic_init (unsigned long address)
irq, iosapic_pin(irq)); irq, iosapic_pin(irq));
#endif #endif
} }
#endif /* CONFIG_IA64_SOFTSDV_HACKS */
for (i = 0; i < NR_IRQS; ++i) { for (i = 0; i < NR_IRQS; ++i) {
if (iosapic_baseirq(i) != irqbase)
continue;
if (iosapic_pin(i) != -1) { if (iosapic_pin(i) != -1) {
if (iosapic_trigger(i) == IO_SAPIC_LEVEL) if (iosapic_trigger(i) == IO_SAPIC_LEVEL)
irq_type = &irq_type_iosapic_level; irq_type = &irq_type_iosapic_level;
......
...@@ -53,6 +53,10 @@ dig_setup (char **cmdline_p) ...@@ -53,6 +53,10 @@ dig_setup (char **cmdline_p)
*/ */
ROOT_DEV = to_kdev_t(0x0802); /* default to second partition on first drive */ ROOT_DEV = to_kdev_t(0x0802); /* default to second partition on first drive */
#ifdef CONFIG_IA64_SOFTSDV_HACKS
ROOT_DEV = to_kdev_t(0x0302); /* 2nd partion on 1st IDE */
#endif /* CONFIG_IA64_SOFTSDV_HACKS */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
init_smp_config(); init_smp_config();
#endif #endif
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/irq.h> #include <linux/irq.h>
static unsigned int static unsigned int
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
* IA-32 ELF support. * IA-32 ELF support.
* *
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
*
* 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/posix_types.h> #include <linux/posix_types.h>
...@@ -84,6 +86,15 @@ void ia64_elf32_init(struct pt_regs *regs) ...@@ -84,6 +86,15 @@ void ia64_elf32_init(struct pt_regs *regs)
current->thread.map_base = 0x40000000; current->thread.map_base = 0x40000000;
/* setup ia32 state for ia32_load_state */
current->thread.eflag = IA32_EFLAG;
current->thread.csd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L, 3L, 1L, 1L, 1L);
current->thread.ssd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
current->thread.tssd = IA64_SEG_DESCRIPTOR(IA32_PAGE_OFFSET + PAGE_SIZE, 0x1FFFL, 0xBL,
1L, 3L, 1L, 1L, 1L);
/* CS descriptor */ /* CS descriptor */
__asm__("mov ar.csd = %0" : /* no outputs */ __asm__("mov ar.csd = %0" : /* no outputs */
: "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L, : "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L,
...@@ -96,9 +107,6 @@ void ia64_elf32_init(struct pt_regs *regs) ...@@ -96,9 +107,6 @@ void ia64_elf32_init(struct pt_regs *regs)
__asm__("mov ar.eflag = %0" : /* no outputs */ : "r" (IA32_EFLAG)); __asm__("mov ar.eflag = %0" : /* no outputs */ : "r" (IA32_EFLAG));
/* Control registers */ /* Control registers */
__asm__("mov ar.cflg = %0"
: /* no outputs */
: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
__asm__("mov ar.fsr = %0" __asm__("mov ar.fsr = %0"
: /* no outputs */ : /* no outputs */
: "r" ((ulong)IA32_FSR_DEFAULT)); : "r" ((ulong)IA32_FSR_DEFAULT));
......
#include <asm/asmmacro.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/signal.h> #include <asm/signal.h>
#include "../kernel/entry.h"
// //
// Get possibly unaligned sigmask argument into an aligned // Get possibly unaligned sigmask argument into an aligned
// kernel buffer // kernel buffer
.text .text
.proc ia32_rt_sigsuspend
.global ia32_rt_sigsuspend
ia32_rt_sigsuspend:
GLOBAL_ENTRY(ia32_rt_sigsuspend)
// We'll cheat and not do an alloc here since we are ultimately // We'll cheat and not do an alloc here since we are ultimately
// going to do a simple branch to the IA64 sys_rt_sigsuspend. // going to do a simple branch to the IA64 sys_rt_sigsuspend.
// r32 is still the first argument which is the signal mask. // r32 is still the first argument which is the signal mask.
...@@ -32,24 +33,22 @@ ia32_rt_sigsuspend: ...@@ -32,24 +33,22 @@ ia32_rt_sigsuspend:
st4 [r32]=r2 st4 [r32]=r2
st4 [r10]=r3 st4 [r10]=r3
br.cond.sptk.many sys_rt_sigsuspend br.cond.sptk.many sys_rt_sigsuspend
END(ia32_rt_sigsuspend)
.section __ex_table,"a" .section __ex_table,"a"
data4 @gprel(1b) data4 @gprel(1b)
data4 (2b-1b)|1 data4 (2b-1b)|1
.previous .previous
GLOBAL_ENTRY(ia32_ret_from_syscall)
PT_REGS_UNWIND_INFO(0)
.endp ia32_rt_sigsuspend
.global ia32_ret_from_syscall
.proc ia32_ret_from_syscall
ia32_ret_from_syscall:
cmp.ge p6,p7=r8,r0 // syscall executed successfully? cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;; ;;
st8 [r2]=r8 // store return value in slot for r8 st8 [r2]=r8 // store return value in slot for r8
br.cond.sptk.few ia64_leave_kernel br.cond.sptk.few ia64_leave_kernel
.endp ia32_ret_from_syscall END(ia32_ret_from_syscall)
// //
// Invoke a system call, but do some tracing before and after the call. // Invoke a system call, but do some tracing before and after the call.
...@@ -61,9 +60,8 @@ ia32_ret_from_syscall: ...@@ -61,9 +60,8 @@ ia32_ret_from_syscall:
// r15 = syscall number // r15 = syscall number
// b6 = syscall entry point // b6 = syscall entry point
// //
.global ia32_trace_syscall GLOBAL_ENTRY(ia32_trace_syscall)
.proc ia32_trace_syscall PT_REGS_UNWIND_INFO(0)
ia32_trace_syscall:
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args
.Lret4: br.call.sptk.few rp=b6 // do the syscall .Lret4: br.call.sptk.few rp=b6 // do the syscall
.Lret5: cmp.lt p6,p0=r8,r0 // syscall failed? .Lret5: cmp.lt p6,p0=r8,r0 // syscall failed?
...@@ -72,42 +70,38 @@ ia32_trace_syscall: ...@@ -72,42 +70,38 @@ ia32_trace_syscall:
st8.spill [r2]=r8 // store return value in slot for r8 st8.spill [r2]=r8 // store return value in slot for r8
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value
.Lret6: br.cond.sptk.many ia64_leave_kernel // rp MUST be != ia64_leave_kernel! .Lret6: br.cond.sptk.many ia64_leave_kernel // rp MUST be != ia64_leave_kernel!
END(ia32_trace_syscall)
.endp ia32_trace_syscall GLOBAL_ENTRY(sys32_vfork)
.align 16
.global sys32_vfork
.proc sys32_vfork
sys32_vfork:
alloc r16=ar.pfs,2,2,3,0;; alloc r16=ar.pfs,2,2,3,0;;
mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
br.cond.sptk.few .fork1 // do the work br.cond.sptk.few .fork1 // do the work
.endp sys32_vfork END(sys32_vfork)
.align 16 GLOBAL_ENTRY(sys32_fork)
.global sys32_fork UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
.proc sys32_fork alloc r16=ar.pfs,2,2,3,0
sys32_fork:
alloc r16=ar.pfs,2,2,3,0;;
mov out0=SIGCHLD // out0 = clone_flags mov out0=SIGCHLD // out0 = clone_flags
;;
.fork1: .fork1:
movl r28=1f mov loc0=rp
mov loc1=rp mov loc1=r16 // save ar.pfs across do_fork
br.cond.sptk.many save_switch_stack DO_SAVE_SWITCH_STACK
1:
mov loc0=r16 // save ar.pfs across do_fork UNW(.body)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp adds out2=IA64_SWITCH_STACK_SIZE+16,sp
adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp
;; ;;
ld8 out1=[r2] // fetch usp from pt_regs.r12 ld8 out1=[r2] // fetch usp from pt_regs.r12
br.call.sptk.few rp=do_fork br.call.sptk.few rp=do_fork
.ret1: .ret1:
mov ar.pfs=loc0 mov ar.pfs=loc1
UNW(.restore sp)
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov rp=loc1 mov rp=loc0
;;
br.ret.sptk.many rp br.ret.sptk.many rp
.endp sys32_fork END(sys32_fork)
.rodata .rodata
.align 8 .align 8
...@@ -304,3 +298,8 @@ ia32_syscall_table: ...@@ -304,3 +298,8 @@ ia32_syscall_table:
data8 sys_ni_syscall /* streams1 */ data8 sys_ni_syscall /* streams1 */
data8 sys_ni_syscall /* streams2 */ data8 sys_ni_syscall /* streams2 */
data8 sys32_vfork /* 190 */ data8 sys32_vfork /* 190 */
/*
* CAUTION: If any system calls are added beyond this point
* then the check in `arch/ia64/kernel/ivt.S' will have
* to be modified also. You've been warned.
*/
...@@ -55,7 +55,7 @@ struct rt_sigframe_ia32 ...@@ -55,7 +55,7 @@ struct rt_sigframe_ia32
}; };
static int static int
copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from) copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
{ {
int err; int err;
...@@ -104,6 +104,7 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate, ...@@ -104,6 +104,7 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate,
struct pt_regs *regs, unsigned long mask) struct pt_regs *regs, unsigned long mask)
{ {
int err = 0; int err = 0;
unsigned long flag;
err |= __put_user((regs->r16 >> 32) & 0xffff , (unsigned int *)&sc->fs); err |= __put_user((regs->r16 >> 32) & 0xffff , (unsigned int *)&sc->fs);
err |= __put_user((regs->r16 >> 48) & 0xffff , (unsigned int *)&sc->gs); err |= __put_user((regs->r16 >> 48) & 0xffff , (unsigned int *)&sc->gs);
...@@ -124,9 +125,11 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate, ...@@ -124,9 +125,11 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate,
#endif #endif
err |= __put_user(regs->cr_iip, &sc->eip); err |= __put_user(regs->cr_iip, &sc->eip);
err |= __put_user(regs->r17 & 0xffff, (unsigned int *)&sc->cs); err |= __put_user(regs->r17 & 0xffff, (unsigned int *)&sc->cs);
#if 0 /*
err |= __put_user(regs->eflags, &sc->eflags); * `eflags' is in an ar register for this context
#endif */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal); err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss); err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
...@@ -190,15 +193,26 @@ restore_sigcontext_ia32(struct pt_regs *regs, struct sigcontext_ia32 *sc, int *p ...@@ -190,15 +193,26 @@ restore_sigcontext_ia32(struct pt_regs *regs, struct sigcontext_ia32 *sc, int *p
COPY(cr_iip, eip); COPY(cr_iip, eip);
COPY_SEG_STRICT(cs); COPY_SEG_STRICT(cs);
COPY_SEG_STRICT(ss); COPY_SEG_STRICT(ss);
#if 0
{ {
unsigned int tmpflags; unsigned int tmpflags;
err |= __get_user(tmpflags, &sc->eflags); unsigned long flag;
/* XXX: Change this to ar.eflags */
regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); /*
regs->orig_eax = -1; /* disable syscall checks */ * IA32 `eflags' is not part of `pt_regs', it's
* in an ar register which is part of the thread
* context. Fortunately, we are executing in the
* IA32 process's context.
*/
err |= __get_user(tmpflags, &sc->eflags);
asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5);
asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag));
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
} }
#if 0
{ {
struct _fpstate * buf; struct _fpstate * buf;
err |= __get_user(buf, &sc->fpstate); err |= __get_user(buf, &sc->fpstate);
...@@ -271,7 +285,7 @@ setup_frame_ia32(int sig, struct k_sigaction *ka, sigset_t *set, ...@@ -271,7 +285,7 @@ setup_frame_ia32(int sig, struct k_sigaction *ka, sigset_t *set,
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
err |= __put_user(frame->retcode, &frame->pretcode); err |= __put_user((long)frame->retcode, &frame->pretcode);
/* This is popl %eax ; movl $,%eax ; int $0x80 */ /* This is popl %eax ; movl $,%eax ; int $0x80 */
err |= __put_user(0xb858, (short *)(frame->retcode+0)); err |= __put_user(0xb858, (short *)(frame->retcode+0));
#define __IA32_NR_sigreturn 119 #define __IA32_NR_sigreturn 119
...@@ -326,8 +340,8 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -326,8 +340,8 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
? current->exec_domain->signal_invmap[sig] ? current->exec_domain->signal_invmap[sig]
: sig), : sig),
&frame->sig); &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user((long)&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc); err |= __put_user((long)&frame->uc, &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info); err |= copy_siginfo_to_user32(&frame->info, info);
/* Create the ucontext. */ /* Create the ucontext. */
...@@ -341,7 +355,7 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -341,7 +355,7 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs, set->sig[0]); regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
err |= __put_user(frame->retcode, &frame->pretcode); err |= __put_user((long)frame->retcode, &frame->pretcode);
/* This is movl $,%eax ; int $0x80 */ /* This is movl $,%eax ; int $0x80 */
err |= __put_user(0xb8, (char *)(frame->retcode+0)); err |= __put_user(0xb8, (char *)(frame->retcode+0));
#define __IA32_NR_rt_sigreturn 173 #define __IA32_NR_rt_sigreturn 173
......
/* /*
* IA32 helper functions * IA32 helper functions
*
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -16,6 +19,57 @@ extern unsigned long *ia32_gdt_table, *ia32_tss; ...@@ -16,6 +19,57 @@ extern unsigned long *ia32_gdt_table, *ia32_tss;
extern void die_if_kernel (char *str, struct pt_regs *regs, long err); extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
void
ia32_save_state (struct thread_struct *thread)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
"mov %5=ar.csd;"
"mov %6=ar.ssd;"
"mov %7=ar.k1"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr),
"=r"(csd), "=r"(ssd), "=r"(tssd));
thread->eflag = eflag;
thread->fsr = fsr;
thread->fcr = fcr;
thread->fir = fir;
thread->fdr = fdr;
thread->csd = csd;
thread->ssd = ssd;
thread->tssd = tssd;
}
void
ia32_load_state (struct thread_struct *thread)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
eflag = thread->eflag;
fsr = thread->fsr;
fcr = thread->fcr;
fir = thread->fir;
fdr = thread->fdr;
csd = thread->csd;
ssd = thread->ssd;
tssd = thread->tssd;
asm volatile ("mov ar.eflag=%0;"
"mov ar.fsr=%1;"
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
"mov ar.csd=%5;"
"mov ar.ssd=%6;"
"mov ar.k1=%7"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr),
"r"(csd), "r"(ssd), "r"(tssd));
}
/* /*
* Setup IA32 GDT and TSS * Setup IA32 GDT and TSS
*/ */
......
/*
* IA32 exceptions handler
*
* 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
*/
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -9,9 +15,11 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) ...@@ -9,9 +15,11 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{ {
struct siginfo siginfo; struct siginfo siginfo;
siginfo.si_errno = 0;
switch ((isr >> 16) & 0xff) { switch ((isr >> 16) & 0xff) {
case 1: case 1:
case 2: case 2:
siginfo.si_signo = SIGTRAP;
if (isr == 0) if (isr == 0)
siginfo.si_code = TRAP_TRACE; siginfo.si_code = TRAP_TRACE;
else if (isr & 0x4) else if (isr & 0x4)
...@@ -21,27 +29,96 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) ...@@ -21,27 +29,96 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
break; break;
case 3: case 3:
siginfo.si_signo = SIGTRAP;
siginfo.si_code = TRAP_BRKPT; siginfo.si_code = TRAP_BRKPT;
break; break;
case 0: /* Divide fault */ case 0: /* Divide fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = FPE_INTDIV;
break;
case 4: /* Overflow */ case 4: /* Overflow */
case 5: /* Bounds fault */ case 5: /* Bounds fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
case 6: /* Invalid Op-code */ case 6: /* Invalid Op-code */
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN;
break;
case 7: /* FP DNA */ case 7: /* FP DNA */
case 8: /* Double Fault */ case 8: /* Double Fault */
case 9: /* Invalid TSS */ case 9: /* Invalid TSS */
case 11: /* Segment not present */ case 11: /* Segment not present */
case 12: /* Stack fault */ case 12: /* Stack fault */
case 13: /* General Protection Fault */ case 13: /* General Protection Fault */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = 0;
break;
case 16: /* Pending FP error */ case 16: /* Pending FP error */
{
unsigned long fsr, fcr;
asm ("mov %0=ar.fsr;"
"mov %1=ar.fcr;"
: "=r"(fsr), "=r"(fcr));
siginfo.si_signo = SIGFPE;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't syncronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
case 0x000:
default:
siginfo.si_code = 0;
break;
case 0x001: /* Invalid Op */
case 0x040: /* Stack Fault */
case 0x240: /* Stack Fault | Direction */
siginfo.si_code = FPE_FLTINV;
break;
case 0x002: /* Denormalize */
case 0x010: /* Underflow */
siginfo.si_code = FPE_FLTUND;
break;
case 0x004: /* Zero Divide */
siginfo.si_code = FPE_FLTDIV;
break;
case 0x008: /* Overflow */
siginfo.si_code = FPE_FLTOVF;
break;
case 0x020: /* Precision */
siginfo.si_code = FPE_FLTRES;
break;
}
break;
}
case 17: /* Alignment check */ case 17: /* Alignment check */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = BUS_ADRALN;
break;
case 19: /* SSE Numeric error */ case 19: /* SSE Numeric error */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
default: default:
return -1; return -1;
} }
siginfo.si_signo = SIGTRAP; force_sig_info(SIGTRAP, &siginfo, current);
siginfo.si_errno = 0;
send_sig_info(SIGTRAP, &siginfo, current);
return 0; return 0;
} }
This diff is collapsed.
# #
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now in the main makefile...
.S.s: .S.s:
$(CPP) $(AFLAGS) -o $*.s $< $(CPP) $(AFLAGS) -o $*.s $<
...@@ -15,16 +10,19 @@ ...@@ -15,16 +10,19 @@
all: kernel.o head.o init_task.o all: kernel.o head.o init_task.o
O_TARGET := kernel.o O_TARGET := kernel.o
O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \ O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \
pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o \ pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
#O_OBJS := fpreg.o OX_OBJS := ia64_ksyms.o
#OX_OBJS := ia64_ksyms.o
ifdef CONFIG_IA64_GENERIC ifdef CONFIG_IA64_GENERIC
O_OBJS += machvec.o O_OBJS += machvec.o
endif endif
ifdef CONFIG_IA64_PALINFO
O_OBJS += palinfo.o
endif
ifdef CONFIG_PCI ifdef CONFIG_PCI
O_OBJS += pci.o O_OBJS += pci.o
endif endif
...@@ -37,6 +35,10 @@ ifdef CONFIG_IA64_MCA ...@@ -37,6 +35,10 @@ ifdef CONFIG_IA64_MCA
O_OBJS += mca.o mca_asm.o O_OBJS += mca.o mca_asm.o
endif endif
ifdef CONFIG_IA64_BRL_EMU
O_OBJS += brl_emu.o
endif
clean:: clean::
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -89,16 +89,16 @@ acpi_iosapic(char *p) ...@@ -89,16 +89,16 @@ acpi_iosapic(char *p)
#ifdef CONFIG_IA64_DIG #ifdef CONFIG_IA64_DIG
acpi_entry_iosapic_t *iosapic = (acpi_entry_iosapic_t *) p; acpi_entry_iosapic_t *iosapic = (acpi_entry_iosapic_t *) p;
unsigned int ver, v; unsigned int ver, v;
int l, pins; int l, max_pin;
ver = iosapic_version(iosapic->address); ver = iosapic_version(iosapic->address);
pins = (ver >> 16) & 0xff; max_pin = (ver >> 16) & 0xff;
printk("IOSAPIC Version %x.%x: address 0x%lx IRQs 0x%x - 0x%x\n", printk("IOSAPIC Version %x.%x: address 0x%lx IRQs 0x%x - 0x%x\n",
(ver & 0xf0) >> 4, (ver & 0x0f), iosapic->address, (ver & 0xf0) >> 4, (ver & 0x0f), iosapic->address,
iosapic->irq_base, iosapic->irq_base + pins); iosapic->irq_base, iosapic->irq_base + max_pin);
for (l = 0; l < pins; l++) { for (l = 0; l <= max_pin; l++) {
v = iosapic->irq_base + l; v = iosapic->irq_base + l;
if (v < 16) if (v < 16)
v = isa_irq_to_vector(v); v = isa_irq_to_vector(v);
...@@ -110,7 +110,7 @@ acpi_iosapic(char *p) ...@@ -110,7 +110,7 @@ acpi_iosapic(char *p)
iosapic_addr(v) = (unsigned long) ioremap(iosapic->address, 0); iosapic_addr(v) = (unsigned long) ioremap(iosapic->address, 0);
iosapic_baseirq(v) = iosapic->irq_base; iosapic_baseirq(v) = iosapic->irq_base;
} }
iosapic_init(iosapic->address); iosapic_init(iosapic->address, iosapic->irq_base);
#endif #endif
} }
......
/*
* Emulation of the "brl" instruction for IA64 processors that
* don't support it in hardware.
* Author: Stephan Zeisset, Intel Corp. <Stephan.Zeisset@intel.com>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5;
struct illegal_op_return {
unsigned long fkt, arg1, arg2, arg3;
};
/*
* The unimplemented bits of a virtual address must be set
* to the value of the most significant implemented bit.
* unimpl_va_mask includes all unimplemented bits and
* the most significant implemented bit, so the result
* of an and operation with the mask must be all 0's
* or all 1's for the address to be valid.
*/
#define unimplemented_virtual_address(va) ( \
((va) & my_cpu_data.unimpl_va_mask) != 0 && \
((va) & my_cpu_data.unimpl_va_mask) != my_cpu_data.unimpl_va_mask \
)
/*
* The unimplemented bits of a physical address must be 0.
* unimpl_pa_mask includes all unimplemented bits, so the result
* of an and operation with the mask must be all 0's for the
* address to be valid.
*/
#define unimplemented_physical_address(pa) ( \
((pa) & my_cpu_data.unimpl_pa_mask) != 0 \
)
/*
* Handle an illegal operation fault that was caused by an
* unimplemented "brl" instruction.
* If we are not successful (e.g because the illegal operation
* wasn't caused by a "brl" after all), we return -1.
* If we are successful, we return either 0 or the address
* of a "fixup" function for manipulating preserved register
* state.
*/
struct illegal_op_return
ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
{
unsigned long bundle[2];
unsigned long opcode, btype, qp, offset;
unsigned long next_ip;
struct siginfo siginfo;
struct illegal_op_return rv;
int tmp_taken, unimplemented_address;
rv.fkt = (unsigned long) -1;
/*
* Decode the instruction bundle.
*/
if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle)))
return rv;
next_ip = (unsigned long) regs->cr_iip + 16;
/* "brl" must be in slot 2. */
if (ia64_psr(regs)->ri != 1) return rv;
/* Must be "mlx" template */
if ((bundle[0] & 0x1e) != 0x4) return rv;
opcode = (bundle[1] >> 60);
btype = ((bundle[1] >> 29) & 0x7);
qp = ((bundle[1] >> 23) & 0x3f);
offset = ((bundle[1] & 0x0800000000000000L) << 4)
| ((bundle[1] & 0x00fffff000000000L) >> 32)
| ((bundle[1] & 0x00000000007fffffL) << 40)
| ((bundle[0] & 0xffff000000000000L) >> 24);
tmp_taken = regs->pr & (1L << qp);
switch(opcode) {
case 0xC:
/*
* Long Branch.
*/
if (btype != 0) return rv;
rv.fkt = 0;
if (!(tmp_taken)) {
/*
* Qualifying predicate is 0.
* Skip instruction.
*/
regs->cr_iip = next_ip;
ia64_psr(regs)->ri = 0;
return rv;
}
break;
case 0xD:
/*
* Long Call.
*/
rv.fkt = 0;
if (!(tmp_taken)) {
/*
* Qualifying predicate is 0.
* Skip instruction.
*/
regs->cr_iip = next_ip;
ia64_psr(regs)->ri = 0;
return rv;
}
/*
* BR[btype] = IP+16
*/
switch(btype) {
case 0:
regs->b0 = next_ip;
break;
case 1:
rv.fkt = (unsigned long) &ia64_set_b1;
break;
case 2:
rv.fkt = (unsigned long) &ia64_set_b2;
break;
case 3:
rv.fkt = (unsigned long) &ia64_set_b3;
break;
case 4:
rv.fkt = (unsigned long) &ia64_set_b4;
break;
case 5:
rv.fkt = (unsigned long) &ia64_set_b5;
break;
case 6:
regs->b6 = next_ip;
break;
case 7:
regs->b7 = next_ip;
break;
}
rv.arg1 = next_ip;
/*
* AR[PFS].pfm = CFM
* AR[PFS].pec = AR[EC]
* AR[PFS].ppl = PSR.cpl
*/
regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
| (ar_ec << 52)
| ((unsigned long) ia64_psr(regs)->cpl << 62));
/*
* CFM.sof -= CFM.sol
* CFM.sol = 0
* CFM.sor = 0
* CFM.rrb.gr = 0
* CFM.rrb.fr = 0
* CFM.rrb.pr = 0
*/
regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f)
- ((regs->cr_ifs >> 7) & 0x7f));
break;
default:
/*
* Unknown opcode.
*/
return rv;
}
regs->cr_iip += offset;
ia64_psr(regs)->ri = 0;
if (ia64_psr(regs)->it == 0)
unimplemented_address = unimplemented_physical_address(regs->cr_iip);
else
unimplemented_address = unimplemented_virtual_address(regs->cr_iip);
if (unimplemented_address) {
/*
* The target address contains unimplemented bits.
*/
printk("Woah! Unimplemented Instruction Address Trap!\n");
siginfo.si_signo = SIGILL;
siginfo.si_errno = 0;
siginfo.si_code = ILL_BADIADDR;
force_sig_info(SIGILL, &siginfo, current);
} else if (ia64_psr(regs)->tb) {
/*
* Branch Tracing is enabled.
* Force a taken branch signal.
*/
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
siginfo.si_code = TRAP_BRANCH;
force_sig_info(SIGTRAP, &siginfo, current);
} else if (ia64_psr(regs)->ss) {
/*
* Single Step is enabled.
* Force a trace signal.
*/
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
siginfo.si_code = TRAP_TRACE;
force_sig_info(SIGTRAP, &siginfo, current);
}
return rv;
}
...@@ -5,15 +5,18 @@ ...@@ -5,15 +5,18 @@
* *
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Hewlett-Packard Co. * Copyright (C) 1999-2000 Hewlett-Packard Co.
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999-2000 Stephane Eranian <eranian@hpl.hp.com>
* *
* All EFI Runtime Services are not implemented yet as EFI only * All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed * supports physical mode addressing on SoftSDV. This is to be fixed
* in a future version. --drummond 1999-07-20 * in a future version. --drummond 1999-07-20
* *
* Implemented EFI runtime services and virtual mode calls. --davidm * Implemented EFI runtime services and virtual mode calls. --davidm
*
* Goutham Rao: <goutham.rao@intel.com>
* Skip non-WB memory and ignore empty memory ranges.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -22,6 +25,7 @@ ...@@ -22,6 +25,7 @@
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#define EFI_DEBUG 0 #define EFI_DEBUG 0
...@@ -172,6 +176,14 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -172,6 +176,14 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
continue; continue;
} }
if (!(md->attribute & EFI_MEMORY_WB))
continue;
if (md->num_pages == 0) {
printk("efi_memmap_walk: ignoring empty region at 0x%lx",
md->phys_addr);
continue;
}
curr.start = PAGE_OFFSET + md->phys_addr; curr.start = PAGE_OFFSET + md->phys_addr;
curr.end = curr.start + (md->num_pages << 12); curr.end = curr.start + (md->num_pages << 12);
...@@ -207,6 +219,61 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -207,6 +219,61 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
} }
} }
/*
* Look for the PAL_CODE region reported by EFI and maps it using an
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
* Abstraction Layer chapter 11 in ADAG
*/
static void
map_pal_code (void)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
int pal_code_count=0;
u64 mask, flags;
u64 vaddr;
efi_map_start = __va(ia64_boot_param.efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param.efi_memmap_size;
efi_desc_size = ia64_boot_param.efi_memdesc_size;
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if (md->type != EFI_PAL_CODE) continue;
if (++pal_code_count > 1) {
printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
md->phys_addr);
continue;
}
mask = ~((1 << _PAGE_SIZE_4M)-1); /* XXX should be dynamic? */
vaddr = PAGE_OFFSET + md->phys_addr;
printk(__FUNCTION__": mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
md->phys_addr, md->phys_addr + (md->num_pages << 12),
vaddr & mask, (vaddr & mask) + 4*1024*1024);
/*
* Cannot write to CRx with PSR.ic=1
*/
ia64_clear_ic(flags);
/*
* ITR0/DTR0: used for kernel code/data
* ITR1/DTR1: used by HP simulator
* ITR2/DTR2: map PAL code
* ITR3/DTR3: used to map PAL calls buffer
*/
ia64_itr(0x1, 2, vaddr & mask,
pte_val(mk_pte_phys(md->phys_addr,
__pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX))),
_PAGE_SIZE_4M);
local_irq_restore(flags);
ia64_srlz_i ();
}
}
void __init void __init
efi_init (void) efi_init (void)
{ {
...@@ -291,6 +358,8 @@ efi_init (void) ...@@ -291,6 +358,8 @@ efi_init (void)
} }
} }
#endif #endif
map_pal_code();
} }
void void
......
/* /*
* EFI call stub. * EFI call stub.
* *
* Copyright (C) 1999 David Mosberger <davidm@hpl.hp.com> * Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999-2000 David Mosberger <davidm@hpl.hp.com>
* *
* This stub allows us to make EFI calls in physical mode with interrupts * This stub allows us to make EFI calls in physical mode with interrupts
* turned off. We need this because we can't call SetVirtualMap() until * turned off. We need this because we can't call SetVirtualMap() until
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
(IA64_PSR_BN) (IA64_PSR_BN)
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/asmmacro.h>
.text .text
.psr abi64 .psr abi64
...@@ -38,53 +40,6 @@ ...@@ -38,53 +40,6 @@
.text .text
/*
* Switch execution mode from virtual to physical or vice versa.
*
* Inputs:
* r16 = new psr to establish
*/
.proc switch_mode
switch_mode:
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
;;
{
flushrs // must be first insn in group
srlz.i
shr.u r19=r15,61 // r19 <- top 3 bits of current IP
}
;;
mov cr.ipsr=r16 // set new PSR
add r3=1f-switch_mode,r15
xor r15=0x7,r19 // flip the region bits
mov r17=ar.bsp
mov r14=rp // get return address into a general register
// switch RSE backing store:
;;
dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
mov r18=ar.rnat // save ar.rnat
;;
mov ar.bspstore=r17 // this steps on ar.rnat
dep r3=r15,r3,61,3 // make rfi return address physical or virtual
;;
mov cr.iip=r3
mov cr.ifs=r0
dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;;
mov ar.rnat=r18 // restore ar.rnat
dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.few rp
.endp switch_mode
/* /*
* Inputs: * Inputs:
* in0 = address of function descriptor of EFI routine to call * in0 = address of function descriptor of EFI routine to call
...@@ -94,13 +49,12 @@ switch_mode: ...@@ -94,13 +49,12 @@ switch_mode:
* r8 = EFI_STATUS returned by called function * r8 = EFI_STATUS returned by called function
*/ */
.global efi_call_phys GLOBAL_ENTRY(efi_call_phys)
.proc efi_call_phys UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
efi_call_phys: alloc loc1=ar.pfs,8,5,7,0
alloc loc0=ar.pfs,8,5,7,0
ld8 r2=[in0],8 // load EFI function's entry point ld8 r2=[in0],8 // load EFI function's entry point
mov loc1=rp mov loc0=rp
UNW(.body)
;; ;;
mov loc2=gp // save global pointer mov loc2=gp // save global pointer
mov loc4=ar.rsc // save RSE configuration mov loc4=ar.rsc // save RSE configuration
...@@ -121,7 +75,7 @@ efi_call_phys: ...@@ -121,7 +75,7 @@ efi_call_phys:
;; ;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
mov out3=in4 mov out3=in4
br.call.sptk.few rp=switch_mode br.call.sptk.few rp=ia64_switch_mode
.ret0: .ret0:
mov out4=in5 mov out4=in5
mov out5=in6 mov out5=in6
...@@ -130,12 +84,11 @@ efi_call_phys: ...@@ -130,12 +84,11 @@ efi_call_phys:
.ret1: .ret1:
mov ar.rsc=r0 // put RSE in enforced lazy, LE mode mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
mov r16=loc3 mov r16=loc3
br.call.sptk.few rp=switch_mode // return to virtual mode br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
.ret2: .ret2:
mov ar.rsc=loc4 // restore RSE configuration mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc0 mov ar.pfs=loc1
mov rp=loc1 mov rp=loc0
mov gp=loc2 mov gp=loc2
br.ret.sptk.few rp br.ret.sptk.few rp
END(efi_call_phys)
.endp efi_call_phys
This diff is collapsed.
...@@ -2,7 +2,64 @@ ...@@ -2,7 +2,64 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be * Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these! * careful not to step on these!
*/ */
#define pEOI p1 /* should leave_kernel write EOI? */
#define pKern p2 /* will leave_kernel return to kernel-mode? */ #define pKern p2 /* will leave_kernel return to kernel-mode? */
#define pSys p4 /* are we processing a (synchronous) system call? */ #define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */ #define pNonSys p5 /* complement of pSys */
#define PT(f) (IA64_PT_REGS_##f##_OFFSET + 16)
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET + 16)
#define PT_REGS_SAVES(off) \
UNW(.unwabi @svr4, 'i'); \
UNW(.fframe IA64_PT_REGS_SIZE+16+(off)); \
UNW(.spillsp rp, PT(CR_IIP)+(off)); \
UNW(.spillsp ar.pfs, PT(CR_IFS)+(off)); \
UNW(.spillsp ar.unat, PT(AR_UNAT)+(off)); \
UNW(.spillsp ar.fpsr, PT(AR_FPSR)+(off)); \
UNW(.spillsp pr, PT(PR)+(off));
#define PT_REGS_UNWIND_INFO(off) \
UNW(.prologue); \
PT_REGS_SAVES(off); \
UNW(.body)
#define SWITCH_STACK_SAVES(off) \
UNW(.savesp ar.unat,SW(CALLER_UNAT)+(off)); UNW(.savesp ar.fpsr,SW(AR_FPSR)+(off)); \
UNW(.spillsp f2,SW(F2)+(off)); UNW(.spillsp f3,SW(F3)+(off)); \
UNW(.spillsp f4,SW(F4)+(off)); UNW(.spillsp f5,SW(F5)+(off)); \
UNW(.spillsp f16,SW(F16)+(off)); UNW(.spillsp f17,SW(F17)+(off)); \
UNW(.spillsp f18,SW(F18)+(off)); UNW(.spillsp f19,SW(F19)+(off)); \
UNW(.spillsp f20,SW(F20)+(off)); UNW(.spillsp f21,SW(F21)+(off)); \
UNW(.spillsp f22,SW(F22)+(off)); UNW(.spillsp f23,SW(F23)+(off)); \
UNW(.spillsp f24,SW(F24)+(off)); UNW(.spillsp f25,SW(F25)+(off)); \
UNW(.spillsp f26,SW(F26)+(off)); UNW(.spillsp f27,SW(F27)+(off)); \
UNW(.spillsp f28,SW(F28)+(off)); UNW(.spillsp f29,SW(F29)+(off)); \
UNW(.spillsp f30,SW(F30)+(off)); UNW(.spillsp f31,SW(F31)+(off)); \
UNW(.spillsp r4,SW(R4)+(off)); UNW(.spillsp r5,SW(R5)+(off)); \
UNW(.spillsp r6,SW(R6)+(off)); UNW(.spillsp r7,SW(R7)+(off)); \
UNW(.spillsp b0,SW(B0)+(off)); UNW(.spillsp b1,SW(B1)+(off)); \
UNW(.spillsp b2,SW(B2)+(off)); UNW(.spillsp b3,SW(B3)+(off)); \
UNW(.spillsp b4,SW(B4)+(off)); UNW(.spillsp b5,SW(B5)+(off)); \
UNW(.spillsp ar.pfs,SW(AR_PFS)+(off)); UNW(.spillsp ar.lc,SW(AR_LC)+(off)); \
UNW(.spillsp @priunat,SW(AR_UNAT)+(off)); \
UNW(.spillsp ar.rnat,SW(AR_RNAT)+(off)); UNW(.spillsp ar.bspstore,SW(AR_BSPSTORE)+(off)); \
UNW(.spillsp pr,SW(PR)+(off))
#define DO_SAVE_SWITCH_STACK \
movl r28=1f; \
;; \
.fframe IA64_SWITCH_STACK_SIZE; \
adds sp=-IA64_SWITCH_STACK_SIZE,sp; \
mov b7=r28; \
SWITCH_STACK_SAVES(0); \
br.cond.sptk.many save_switch_stack; \
1:
#define DO_LOAD_SWITCH_STACK(extra) \
movl r28=1f; \
;; \
mov b7=r28; \
br.cond.sptk.many load_switch_stack; \
1: UNW(.restore sp); \
extra; \
adds sp=IA64_SWITCH_STACK_SIZE,sp
...@@ -124,7 +124,18 @@ asm (" ...@@ -124,7 +124,18 @@ asm ("
.proc pal_emulator_static .proc pal_emulator_static
pal_emulator_static: pal_emulator_static:
mov r8=-1 mov r8=-1
cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
mov r9=256
;;
cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
(p6) br.cond.sptk.few static
;;
mov r9=512
;;
cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked
;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
;; ;;
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
...@@ -157,7 +168,12 @@ asm (" ...@@ -157,7 +168,12 @@ asm ("
;; ;;
mov ar.lc=r9 mov ar.lc=r9
mov r8=r0 mov r8=r0
1: br.cond.sptk.few rp 1:
br.cond.sptk.few rp
stacked:
br.ret.sptk.few rp
.endp pal_emulator_static\n"); .endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
......
...@@ -3,10 +3,11 @@ ...@@ -3,10 +3,11 @@
* each task's text region. For now, it contains the signal * each task's text region. For now, it contains the signal
* trampoline code only. * trampoline code only.
* *
* Copyright (C) 1999 Hewlett-Packard Co * Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <asm/asmmacro.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -75,15 +76,12 @@ ...@@ -75,15 +76,12 @@
* [sp+16] = sigframe * [sp+16] = sigframe
*/ */
.global ia64_sigtramp GLOBAL_ENTRY(ia64_sigtramp)
.proc ia64_sigtramp
ia64_sigtramp:
ld8 r10=[r3],8 // get signal handler entry point ld8 r10=[r3],8 // get signal handler entry point
br.call.sptk.many rp=invoke_sighandler br.call.sptk.many rp=invoke_sighandler
.endp ia64_sigtramp END(ia64_sigtramp)
.proc invoke_sighandler ENTRY(invoke_sighandler)
invoke_sighandler:
ld8 gp=[r3] // get signal handler's global pointer ld8 gp=[r3] // get signal handler's global pointer
mov b6=r10 mov b6=r10
cover // push args in interrupted frame onto backing store cover // push args in interrupted frame onto backing store
...@@ -152,10 +150,9 @@ back_from_restore_rbs: ...@@ -152,10 +150,9 @@ back_from_restore_rbs:
ldf.fill f15=[base1],32 ldf.fill f15=[base1],32
mov r15=__NR_rt_sigreturn mov r15=__NR_rt_sigreturn
break __BREAK_SYSCALL break __BREAK_SYSCALL
.endp invoke_sighandler END(invoke_sighandler)
.proc setup_rbs ENTRY(setup_rbs)
setup_rbs:
flushrs // must be first in insn flushrs // must be first in insn
mov ar.rsc=r0 // put RSE into enforced lazy mode mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
...@@ -167,9 +164,9 @@ setup_rbs: ...@@ -167,9 +164,9 @@ setup_rbs:
mov ar.rsc=0xf // set RSE into eager mode, pl 3 mov ar.rsc=0xf // set RSE into eager mode, pl 3
invala // invalidate ALAT invala // invalidate ALAT
br.cond.sptk.many back_from_setup_rbs br.cond.sptk.many back_from_setup_rbs
END(setup_rbs)
.proc restore_rbs ENTRY(restore_rbs)
restore_rbs:
flushrs flushrs
mov ar.rsc=r0 // put RSE into enforced lazy mode mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
...@@ -181,5 +178,4 @@ restore_rbs: ...@@ -181,5 +178,4 @@ restore_rbs:
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc) mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode // invala not necessary as that will happen when returning to user-mode
br.cond.sptk.many back_from_restore_rbs br.cond.sptk.many back_from_restore_rbs
END(restore_rbs)
.endp restore_rbs
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/offsets.h> #include <asm/offsets.h>
...@@ -54,10 +55,12 @@ halt_msg: ...@@ -54,10 +55,12 @@ halt_msg:
stringz "Halting kernel\n" stringz "Halting kernel\n"
.text .text
.align 16
.global _start GLOBAL_ENTRY(_start)
.proc _start UNW(.prologue)
_start: UNW(.save rp, r4) // terminate unwind chain with a NULL rp
UNW(mov r4=r0)
UNW(.body)
// set IVT entry point---can't access I/O ports without it // set IVT entry point---can't access I/O ports without it
movl r3=ia64_ivt movl r3=ia64_ivt
;; ;;
...@@ -156,12 +159,9 @@ alive_msg: ...@@ -156,12 +159,9 @@ alive_msg:
ld8 out0=[r2] ld8 out0=[r2]
br.call.sptk.few b0=console_print br.call.sptk.few b0=console_print
self: br.sptk.few self // endless loop self: br.sptk.few self // endless loop
.endp _start END(_start)
.align 16 GLOBAL_ENTRY(ia64_save_debug_regs)
.global ia64_save_debug_regs
.proc ia64_save_debug_regs
ia64_save_debug_regs:
alloc r16=ar.pfs,1,0,0,0 alloc r16=ar.pfs,1,0,0,0
mov r20=ar.lc // preserve ar.lc mov r20=ar.lc // preserve ar.lc
mov ar.lc=IA64_NUM_DBG_REGS-1 mov ar.lc=IA64_NUM_DBG_REGS-1
...@@ -177,13 +177,10 @@ ia64_save_debug_regs: ...@@ -177,13 +177,10 @@ ia64_save_debug_regs:
br.cloop.sptk.few 1b br.cloop.sptk.few 1b
;; ;;
mov ar.lc=r20 // restore ar.lc mov ar.lc=r20 // restore ar.lc
br.ret.sptk.few b0 br.ret.sptk.few rp
.endp ia64_save_debug_regs END(ia64_save_debug_regs)
.align 16 GLOBAL_ENTRY(ia64_load_debug_regs)
.global ia64_load_debug_regs
.proc ia64_load_debug_regs
ia64_load_debug_regs:
alloc r16=ar.pfs,1,0,0,0 alloc r16=ar.pfs,1,0,0,0
lfetch.nta [in0] lfetch.nta [in0]
mov r20=ar.lc // preserve ar.lc mov r20=ar.lc // preserve ar.lc
...@@ -200,13 +197,10 @@ ia64_load_debug_regs: ...@@ -200,13 +197,10 @@ ia64_load_debug_regs:
br.cloop.sptk.few 1b br.cloop.sptk.few 1b
;; ;;
mov ar.lc=r20 // restore ar.lc mov ar.lc=r20 // restore ar.lc
br.ret.sptk.few b0 br.ret.sptk.few rp
.endp ia64_load_debug_regs END(ia64_load_debug_regs)
.align 16 GLOBAL_ENTRY(__ia64_save_fpu)
.global __ia64_save_fpu
.proc __ia64_save_fpu
__ia64_save_fpu:
alloc r2=ar.pfs,1,0,0,0 alloc r2=ar.pfs,1,0,0,0
adds r3=16,in0 adds r3=16,in0
;; ;;
...@@ -354,12 +348,9 @@ __ia64_save_fpu: ...@@ -354,12 +348,9 @@ __ia64_save_fpu:
stf.spill.nta [in0]=f126,32 stf.spill.nta [in0]=f126,32
stf.spill.nta [ r3]=f127,32 stf.spill.nta [ r3]=f127,32
br.ret.sptk.few rp br.ret.sptk.few rp
.endp __ia64_save_fpu END(__ia64_save_fpu)
.align 16 GLOBAL_ENTRY(__ia64_load_fpu)
.global __ia64_load_fpu
.proc __ia64_load_fpu
__ia64_load_fpu:
alloc r2=ar.pfs,1,0,0,0 alloc r2=ar.pfs,1,0,0,0
adds r3=16,in0 adds r3=16,in0
;; ;;
...@@ -507,12 +498,9 @@ __ia64_load_fpu: ...@@ -507,12 +498,9 @@ __ia64_load_fpu:
ldf.fill.nta f126=[in0],32 ldf.fill.nta f126=[in0],32
ldf.fill.nta f127=[ r3],32 ldf.fill.nta f127=[ r3],32
br.ret.sptk.few rp br.ret.sptk.few rp
.endp __ia64_load_fpu END(__ia64_load_fpu)
.align 16 GLOBAL_ENTRY(__ia64_init_fpu)
.global __ia64_init_fpu
.proc __ia64_init_fpu
__ia64_init_fpu:
alloc r2=ar.pfs,0,0,0,0 alloc r2=ar.pfs,0,0,0,0
stf.spill [sp]=f0 stf.spill [sp]=f0
mov f32=f0 mov f32=f0
...@@ -644,4 +632,74 @@ __ia64_init_fpu: ...@@ -644,4 +632,74 @@ __ia64_init_fpu:
ldf.fill f126=[sp] ldf.fill f126=[sp]
mov f127=f0 mov f127=f0
br.ret.sptk.few rp br.ret.sptk.few rp
.endp __ia64_init_fpu END(__ia64_init_fpu)
/*
* Switch execution mode from virtual to physical or vice versa.
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
GLOBAL_ENTRY(ia64_switch_mode)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
;;
{
flushrs // must be first insn in group
srlz.i
shr.u r19=r15,61 // r19 <- top 3 bits of current IP
}
;;
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode,r15
xor r15=0x7,r19 // flip the region bits
mov r17=ar.bsp
mov r14=rp // get return address into a general register
// switch RSE backing store:
;;
dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
mov r18=ar.rnat // save ar.rnat
;;
mov ar.bspstore=r17 // this steps on ar.rnat
dep r3=r15,r3,61,3 // make rfi return address physical or virtual
;;
mov cr.iip=r3
mov cr.ifs=r0
dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;;
mov ar.rnat=r18 // restore ar.rnat
dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.few rp
END(ia64_switch_mode)
#ifdef CONFIG_IA64_BRL_EMU
/*
* Assembly routines used by brl_emu.c to set preserved register state.
*/
#define SET_REG(reg) \
GLOBAL_ENTRY(ia64_set_##reg); \
alloc r16=ar.pfs,1,0,0,0; \
mov reg=r32; \
;; \
br.ret.sptk rp; \
END(ia64_set_##reg)
SET_REG(b1);
SET_REG(b2);
SET_REG(b3);
SET_REG(b4);
SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */
/*
* Architecture-specific kernel symbols
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strtok);
#include <linux/pci.h>
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent);
#include <linux/in6.h>
#include <asm/checksum.h>
EXPORT_SYMBOL(csum_partial_copy_nocheck);
#include <asm/irq.h>
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
#include <asm/current.h>
#include <asm/hardirq.h>
EXPORT_SYMBOL(irq_stat);
#include <asm/processor.h>
EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(kernel_thread);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq);
#include <asm/smplock.h>
EXPORT_SYMBOL(kernel_flag);
#include <asm/system.h>
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#endif
#include <asm/uaccess.h>
EXPORT_SYMBOL(__copy_user);
#include <asm/unistd.h>
EXPORT_SYMBOL(__ia64_syscall);
/* from arch/ia64/lib */
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __moddi3(void);
extern void __umoddi3(void);
EXPORT_SYMBOL_NOVERS(__divdi3);
EXPORT_SYMBOL_NOVERS(__udivdi3);
EXPORT_SYMBOL_NOVERS(__moddi3);
EXPORT_SYMBOL_NOVERS(__umoddi3);
...@@ -201,10 +201,14 @@ static void show(char * str) ...@@ -201,10 +201,14 @@ static void show(char * str)
printk(" %d",local_bh_count(i)); printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:"); printk(" ]\nStack dumps:");
#ifdef __ia64__ #if defined(__ia64__)
printk(" ]\nStack dumps: <unimplemented on IA-64---please fix me>"); /*
/* for now we don't have stack dumping support... */ * We can't unwind the stack of another CPU without access to
#elif __i386__ * the registers of that CPU. And sending an IPI when we're
* in a potentially wedged state doesn't sound like a smart
* idea.
*/
#elif defined(__i386__)
for(i=0;i< smp_num_cpus;i++) { for(i=0;i< smp_num_cpus;i++) {
unsigned long esp; unsigned long esp;
if(i==cpu) if(i==cpu)
...@@ -227,9 +231,7 @@ static void show(char * str) ...@@ -227,9 +231,7 @@ static void show(char * str)
You lose... You lose...
#endif #endif
printk("\nCPU %d:",cpu); printk("\nCPU %d:",cpu);
#ifdef __i386__
show_stack(NULL); show_stack(NULL);
#endif
printk("\n"); printk("\n");
} }
...@@ -582,7 +584,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) ...@@ -582,7 +584,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action; action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */ status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */ if (!(status & IRQ_PER_CPU))
status |= IRQ_INPROGRESS; /* we are handling it */
} }
desc->status = status; desc->status = status;
......
...@@ -33,7 +33,9 @@ ...@@ -33,7 +33,9 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC #define IRQ_DEBUG 0
#ifdef CONFIG_ITANIUM_A1_SPECIFIC
spinlock_t ivr_read_lock; spinlock_t ivr_read_lock;
#endif #endif
...@@ -49,7 +51,7 @@ __u8 isa_irq_to_vector_map[16] = { ...@@ -49,7 +51,7 @@ __u8 isa_irq_to_vector_map[16] = {
0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41
}; };
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC #ifdef CONFIG_ITANIUM_A1_SPECIFIC
int usbfix; int usbfix;
...@@ -63,7 +65,7 @@ usbfix_option (char *str) ...@@ -63,7 +65,7 @@ usbfix_option (char *str)
__setup("usbfix", usbfix_option); __setup("usbfix", usbfix_option);
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ #endif /* CONFIG_ITANIUM_A1_SPECIFIC */
/* /*
* That's where the IVT branches when we get an external * That's where the IVT branches when we get an external
...@@ -73,13 +75,8 @@ __setup("usbfix", usbfix_option); ...@@ -73,13 +75,8 @@ __setup("usbfix", usbfix_option);
void void
ia64_handle_irq (unsigned long vector, struct pt_regs *regs) ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
{ {
unsigned long bsp, sp, saved_tpr; unsigned long saved_tpr;
#ifdef CONFIG_ITANIUM_A1_SPECIFIC
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
# ifndef CONFIG_SMP
static unsigned int max_prio = 0;
unsigned int prev_prio;
# endif
unsigned long eoi_ptr; unsigned long eoi_ptr;
# ifdef CONFIG_USB # ifdef CONFIG_USB
...@@ -95,18 +92,14 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs) ...@@ -95,18 +92,14 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
spin_lock(&ivr_read_lock); spin_lock(&ivr_read_lock);
{ {
unsigned int tmp; unsigned int tmp;
/* /*
* Disable PCI writes * Disable PCI writes
*/ */
outl(0x80ff81c0, 0xcf8); outl(0x80ff81c0, 0xcf8);
tmp = inl(0xcfc); tmp = inl(0xcfc);
outl(tmp | 0x400, 0xcfc); outl(tmp | 0x400, 0xcfc);
eoi_ptr = inl(0xcfc); eoi_ptr = inl(0xcfc);
vector = ia64_get_ivr(); vector = ia64_get_ivr();
/* /*
* Enable PCI writes * Enable PCI writes
*/ */
...@@ -118,75 +111,61 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs) ...@@ -118,75 +111,61 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
if (usbfix) if (usbfix)
reenable_usb(); reenable_usb();
# endif # endif
#endif /* CONFIG_ITANIUM_A1_SPECIFIC */
# ifndef CONFIG_SMP #if IRQ_DEBUG
prev_prio = max_prio; {
if (vector < max_prio) { unsigned long bsp, sp;
printk ("ia64_handle_irq: got vector %lu while %u was in progress!\n",
vector, max_prio); asm ("mov %0=ar.bsp" : "=r"(bsp));
asm ("mov %0=sp" : "=r"(sp));
} else
max_prio = vector; if ((sp - bsp) < 1024) {
# endif /* !CONFIG_SMP */ static unsigned char count;
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ static long last_time;
if (count > 5 && jiffies - last_time > 5*HZ)
count = 0;
if (++count < 5) {
last_time = jiffies;
printk("ia64_handle_irq: DANGER: less than "
"1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
}
}
}
#endif /* IRQ_DEBUG */
/* /*
* Always set TPR to limit maximum interrupt nesting depth to * Always set TPR to limit maximum interrupt nesting depth to
* 16 (without this, it would be ~240, which could easily lead * 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows. * to kernel stack overflows).
*/ */
saved_tpr = ia64_get_tpr(); saved_tpr = ia64_get_tpr();
ia64_srlz_d(); ia64_srlz_d();
ia64_set_tpr(vector); do {
ia64_srlz_d(); if (vector >= NR_IRQS) {
printk("handle_irq: invalid vector %lu\n", vector);
asm ("mov %0=ar.bsp" : "=r"(bsp)); ia64_set_tpr(saved_tpr);
asm ("mov %0=sp" : "=r"(sp)); ia64_srlz_d();
return;
if ((sp - bsp) < 1024) {
static long last_time;
static unsigned char count;
if (count > 5 && jiffies - last_time > 5*HZ)
count = 0;
if (++count < 5) {
last_time = jiffies;
printk("ia64_handle_irq: DANGER: less than 1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
} }
} ia64_set_tpr(vector);
ia64_srlz_d();
/* do_IRQ(vector, regs);
* The interrupt is now said to be in service
*/
if (vector >= NR_IRQS) {
printk("handle_irq: invalid vector %lu\n", vector);
goto out;
}
do_IRQ(vector, regs);
out:
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
{
long pEOI;
asm ("mov %0=0;; (p1) mov %0=1" : "=r"(pEOI));
if (!pEOI) {
printk("Yikes: ia64_handle_irq() without pEOI!!\n");
asm volatile ("cmp.eq p1,p0=r0,r0" : "=r"(pEOI));
}
}
local_irq_disable();
# ifndef CONFIG_SMP
if (max_prio == vector)
max_prio = prev_prio;
# endif /* !CONFIG_SMP */
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
ia64_srlz_d(); /*
ia64_set_tpr(saved_tpr); * Disable interrupts and send EOI:
ia64_srlz_d(); */
local_irq_disable();
ia64_set_tpr(saved_tpr);
ia64_eoi();
#ifdef CONFIG_ITANIUM_A1_SPECIFIC
break;
#endif
vector = ia64_get_ivr();
} while (vector != IA64_SPURIOUS_INT);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -210,12 +189,12 @@ init_IRQ (void) ...@@ -210,12 +189,12 @@ init_IRQ (void)
ia64_set_lrr0(0, 1); ia64_set_lrr0(0, 1);
ia64_set_lrr1(0, 1); ia64_set_lrr1(0, 1);
irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic;
irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic; irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Configure the IPI vector and handler * Configure the IPI vector and handler
*/ */
irq_desc[IPI_IRQ].status |= IRQ_PER_CPU;
irq_desc[IPI_IRQ].handler = &irq_type_ia64_sapic; irq_desc[IPI_IRQ].handler = &irq_type_ia64_sapic;
setup_irq(IPI_IRQ, &ipi_irqaction); setup_irq(IPI_IRQ, &ipi_irqaction);
#endif #endif
...@@ -234,7 +213,7 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect) ...@@ -234,7 +213,7 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect)
{ {
unsigned long ipi_addr; unsigned long ipi_addr;
unsigned long ipi_data; unsigned long ipi_data;
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC #ifdef CONFIG_ITANIUM_A1_SPECIFIC
unsigned long flags; unsigned long flags;
#endif #endif
# define EID 0 # define EID 0
...@@ -242,13 +221,13 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect) ...@@ -242,13 +221,13 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect)
ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_data = (delivery_mode << 8) | (vector & 0xff);
ipi_addr = ipi_base_addr | ((cpu << 8 | EID) << 4) | ((redirect & 1) << 3); ipi_addr = ipi_base_addr | ((cpu << 8 | EID) << 4) | ((redirect & 1) << 3);
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC #ifdef CONFIG_ITANIUM_A1_SPECIFIC
spin_lock_irqsave(&ivr_read_lock, flags); spin_lock_irqsave(&ivr_read_lock, flags);
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ #endif
writeq(ipi_data, ipi_addr); writeq(ipi_data, ipi_addr);
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC #ifdef CONFIG_ITANIUM_A1_SPECIFIC
spin_unlock_irqrestore(&ivr_read_lock, flags); spin_unlock_irqrestore(&ivr_read_lock, flags);
#endif #endif
} }
This diff is collapsed.
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/config.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack, // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack,
// switch modes, jump to C INIT handler // switch modes, jump to C INIT handler
// //
#include <asm/offsets.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mca_asm.h> #include <asm/mca_asm.h>
......
...@@ -101,7 +101,6 @@ ...@@ -101,7 +101,6 @@
;; \ ;; \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \ st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
st8.spill [r17]=rR1,16; /* save original r1 */ \ st8.spill [r17]=rR1,16; /* save original r1 */ \
cmp.ne pEOI,p0=r0,r0 /* clear pEOI by default */ \
;; \ ;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \ .mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \ .mem.offset 8,0; st8.spill [r17]=r3,16; \
......
...@@ -4,9 +4,16 @@ ...@@ -4,9 +4,16 @@
* *
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 David Mosberger <davidm@hpl.hp.com> * Copyright (C) 1999-2000 David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
*
* 05/22/2000 eranian Added support for stacked register calls
* 05/24/2000 eranian Added support for physical mode static calls
*/ */
#include <asm/asmmacro.h>
#include <asm/processor.h>
.text .text
.psr abi64 .psr abi64
.psr lsb .psr lsb
...@@ -24,29 +31,23 @@ pal_entry_point: ...@@ -24,29 +31,23 @@ pal_entry_point:
* *
* in0 Address of the PAL entry point (text address, NOT a function descriptor). * in0 Address of the PAL entry point (text address, NOT a function descriptor).
*/ */
.align 16 GLOBAL_ENTRY(ia64_pal_handler_init)
.global ia64_pal_handler_init
.proc ia64_pal_handler_init
ia64_pal_handler_init:
alloc r3=ar.pfs,1,0,0,0 alloc r3=ar.pfs,1,0,0,0
movl r2=pal_entry_point movl r2=pal_entry_point
;; ;;
st8 [r2]=in0 st8 [r2]=in0
br.ret.sptk.few rp br.ret.sptk.few rp
END(ia64_pal_handler_init)
.endp ia64_pal_handler_init
/* /*
* Default PAL call handler. This needs to be coded in assembly because it uses * Default PAL call handler. This needs to be coded in assembly because it uses
* the static calling convention, i.e., the RSE may not be used and calls are * the static calling convention, i.e., the RSE may not be used and calls are
* done via "br.cond" (not "br.call"). * done via "br.cond" (not "br.call").
*/ */
.align 16 GLOBAL_ENTRY(ia64_pal_default_handler)
.global ia64_pal_default_handler
.proc ia64_pal_default_handler
ia64_pal_default_handler:
mov r8=-1 mov r8=-1
br.cond.sptk.few rp br.cond.sptk.few rp
END(ia64_pal_default_handler)
/* /*
* Make a PAL call using the static calling convention. * Make a PAL call using the static calling convention.
...@@ -56,64 +57,139 @@ ia64_pal_default_handler: ...@@ -56,64 +57,139 @@ ia64_pal_default_handler:
* in2 - in4 Remaning PAL arguments * in2 - in4 Remaning PAL arguments
* *
*/ */
GLOBAL_ENTRY(ia64_pal_call_static)
UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6))
alloc loc1 = ar.pfs,6,90,0,0
movl loc2 = pal_entry_point
1: {
mov r28 = in0
mov r29 = in1
mov r8 = ip
}
;;
ld8 loc2 = [loc2] // loc2 <- entry point
mov r30 = in2
mov r31 = in3
;;
mov loc3 = psr
mov loc0 = rp
UNW(.body)
adds r8 = .ret0-1b,r8
;;
rsm psr.i
mov b7 = loc2
mov rp = r8
;;
br.cond.sptk.few b7
.ret0: mov psr.l = loc3
mov ar.pfs = loc1
mov rp = loc0
;;
srlz.d // seralize restoration of psr.l
br.ret.sptk.few b0
END(ia64_pal_call_static)
#ifdef __GCC_MULTIREG_RETVALS__ /*
# define arg0 in0 * Make a PAL call using the stacked registers calling convention.
# define arg1 in1 *
# define arg2 in2 * Inputs:
# define arg3 in3 * in0 Index of PAL service
# define arg4 in4 * in2 - in3 Remaning PAL arguments
#else */
# define arg0 in1 GLOBAL_ENTRY(ia64_pal_call_stacked)
# define arg1 in2 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5))
# define arg2 in3 alloc loc1 = ar.pfs,5,4,87,0
# define arg3 in4 movl loc2 = pal_entry_point
# define arg4 in5
#endif
.text mov r28 = in0 // Index MUST be copied to r28
.psr abi64 mov out0 = in0 // AND in0 of PAL function
.psr lsb mov loc0 = rp
.lsb UNW(.body)
;;
ld8 loc2 = [loc2] // loc2 <- entry point
mov out1 = in1
mov out2 = in2
mov out3 = in3
mov loc3 = psr
;;
rsm psr.i
mov b7 = loc2
;;
br.call.sptk.many rp=b7 // now make the call
.ret2:
mov psr.l = loc3
mov ar.pfs = loc1
mov rp = loc0
;;
srlz.d // serialize restoration of psr.l
br.ret.sptk.few b0
END(ia64_pal_call_stacked)
/*
* Make a physical mode PAL call using the static registers calling convention.
*
* Inputs:
* in0 Index of PAL service
* in2 - in3 Remaning PAL arguments
*
* PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
* So we don't need to clear them.
*/
#define PAL_PSR_BITS_TO_CLEAR \
(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
IA64_PSR_DFL | IA64_PSR_DFH)
#define PAL_PSR_BITS_TO_SET \
(IA64_PSR_BN)
.align 16
.global ia64_pal_call_static GLOBAL_ENTRY(ia64_pal_call_phys_static)
.proc ia64_pal_call_static UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6))
ia64_pal_call_static: alloc loc1 = ar.pfs,6,90,0,0
alloc loc0 = ar.pfs,6,90,0,0 movl loc2 = pal_entry_point
movl loc2 = pal_entry_point
1: { 1: {
mov r28 = arg0 mov r28 = in0 // copy procedure index
mov r29 = arg1 mov r8 = ip // save ip to compute branch
mov r8 = ip mov loc0 = rp // save rp
} }
UNW(.body)
;; ;;
ld8 loc2 = [loc2] // loc2 <- entry point ld8 loc2 = [loc2] // loc2 <- entry point
mov r30 = arg2 mov r29 = in1 // first argument
mov r31 = arg3 mov r30 = in2 // copy arg2
mov r31 = in3 // copy arg3
;; ;;
mov loc3 = psr mov loc3 = psr // save psr
mov loc1 = rp adds r8 = .ret4-1b,r8 // calculate return address for call
adds r8 = .ret0-1b,r8
;;
rsm psr.i
mov b7 = loc2
mov rp = r8
;; ;;
br.cond.sptk.few b7 mov loc4=ar.rsc // save RSE configuration
.ret0: mov psr.l = loc3 dep.z loc2=loc2,0,61 // convert pal entry point to physical
#ifndef __GCC_MULTIREG_RETVALS__ dep.z r8=r8,0,61 // convert rp to physical
st8 [in0] = r8, 8
;; ;;
st8 [in0] = r9, 8 mov b7 = loc2 // install target to branch reg
mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
movl r16=PAL_PSR_BITS_TO_CLEAR
movl r17=PAL_PSR_BITS_TO_SET
;; ;;
st8 [in0] = r10, 8 or loc3=loc3,r17 // add in psr the bits to set
;; ;;
st8 [in0] = r11, 8 andcm r16=loc3,r16 // removes bits to clear from psr
#endif br.call.sptk.few rp=ia64_switch_mode
mov ar.pfs = loc0 .ret3:
mov rp = loc1 mov rp = r8 // install return address (physical)
br.cond.sptk.few b7
.ret4:
mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
.ret5: mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
mov rp = loc0
;; ;;
mov ar.rsc=loc4 // restore RSE configuration
srlz.d // seralize restoration of psr.l srlz.d // seralize restoration of psr.l
br.ret.sptk.few b0 br.ret.sptk.few b0
.endp ia64_pal_call_static END(ia64_pal_call_phys_static)
This diff is collapsed.
...@@ -23,8 +23,8 @@ pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle ...@@ -23,8 +23,8 @@ pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle
void *ret; void *ret;
int gfp = GFP_ATOMIC; int gfp = GFP_ATOMIC;
if (!hwdev || hwdev->dma_mask != 0xffffffff) if (!hwdev || hwdev->dma_mask == 0xffffffff)
gfp |= GFP_DMA; gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
ret = (void *)__get_free_pages(gfp, get_order(size)); ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret) { if (ret) {
......
...@@ -133,7 +133,7 @@ pci_find_bios(void) ...@@ -133,7 +133,7 @@ pci_find_bios(void)
* Initialization. Uses the SAL interface * Initialization. Uses the SAL interface
*/ */
#define PCI_BUSSES_TO_SCAN 2 /* On "real" ;) hardware this will be 255 */ #define PCI_BUSES_TO_SCAN 255
void __init void __init
pcibios_init(void) pcibios_init(void)
...@@ -147,7 +147,7 @@ pcibios_init(void) ...@@ -147,7 +147,7 @@ pcibios_init(void)
} }
printk("PCI: Probing PCI hardware\n"); printk("PCI: Probing PCI hardware\n");
for (i = 0; i < PCI_BUSSES_TO_SCAN; i++) for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, ops, NULL); pci_scan_bus(i, ops, NULL);
platform_pci_fixup(); platform_pci_fixup();
return; return;
...@@ -197,7 +197,7 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r ...@@ -197,7 +197,7 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
ranges->mem_end -= bus->resource[1]->start; ranges->mem_end -= bus->resource[1]->start;
} }
int __init int
pcibios_enable_device (struct pci_dev *dev) pcibios_enable_device (struct pci_dev *dev)
{ {
/* Not needed, since we enable all devices at startup. */ /* Not needed, since we enable all devices at startup. */
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef __GCC_MULTIREG_RETVALS__
/*
* gcc currently does not conform to the ia-64 calling
* convention as far as returning function values are
* concerned. Instead of returning values up to 32 bytes in
* size in r8-r11, gcc returns any value bigger than a
* doubleword via a structure that's allocated by the caller
* and whose address is passed into the function. Since
* SAL_PROC returns values according to the calling
* convention, this stub takes care of copying r8-r11 to the
* place where gcc expects them.
*/
.text
.psr abi64
.psr lsb
.lsb
.align 16
.global ia64_sal_stub
ia64_sal_stub:
/*
* Sheesh, the Cygnus backend passes the pointer to a return value structure in
* in0 whereas the HP backend passes it in r8. Don't you hate those little
* differences...
*/
#ifdef GCC_RETVAL_POINTER_IN_R8
adds r2=-24,sp
adds sp=-48,sp
mov r14=rp
;;
st8 [r2]=r8,8 // save pointer to return value
addl r3=@ltoff(ia64_sal),gp
;;
ld8 r3=[r3]
st8 [r2]=gp,8 // save global pointer
;;
ld8 r3=[r3] // fetch the value of ia64_sal
st8 [r2]=r14 // save return pointer
;;
ld8 r2=[r3],8 // load function's entry point
;;
ld8 gp=[r3] // load function's global pointer
;;
mov b6=r2
br.call.sptk.few rp=b6
.ret0: adds r2=24,sp
;;
ld8 r3=[r2],8 // restore pointer to return value
;;
ld8 gp=[r2],8 // restore global pointer
st8 [r3]=r8,8
;;
ld8 r14=[r2] // restore return pointer
st8 [r3]=r9,8
;;
mov rp=r14
st8 [r3]=r10,8
;;
st8 [r3]=r11,8
adds sp=48,sp
br.sptk.few rp
#else
/*
* On input:
* in0 = pointer to return value structure
* in1 = index of SAL function to call
* in2..inN = remaining args to SAL call
*/
/*
* We allocate one input and eight output register such that the br.call instruction
* will rename in1-in7 to in0-in6---exactly what we want because SAL doesn't want to
* see the pointer to the return value structure.
*/
alloc r15=ar.pfs,1,0,8,0
adds r2=-24,sp
adds sp=-48,sp
mov r14=rp
;;
st8 [r2]=r15,8 // save ar.pfs
addl r3=@ltoff(ia64_sal),gp
;;
ld8 r3=[r3] // get address of ia64_sal
st8 [r2]=gp,8 // save global pointer
;;
ld8 r3=[r3] // get value of ia64_sal
st8 [r2]=r14,8 // save return address (rp)
;;
ld8 r2=[r3],8 // load function's entry point
;;
ld8 gp=[r3] // load function's global pointer
mov b6=r2
br.call.sptk.few rp=b6 // make SAL call
.ret0: adds r2=24,sp
;;
ld8 r15=[r2],8 // restore ar.pfs
;;
ld8 gp=[r2],8 // restore global pointer
st8 [in0]=r8,8 // store 1. dword of return value
;;
ld8 r14=[r2] // restore return address (rp)
st8 [in0]=r9,8 // store 2. dword of return value
;;
mov rp=r14
st8 [in0]=r10,8 // store 3. dword of return value
;;
st8 [in0]=r11,8
adds sp=48,sp // pop stack frame
mov ar.pfs=r15
br.ret.sptk.few rp
#endif
.endp ia64_sal_stub
#endif /* __GCC_MULTIREG_RETVALS__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -3,8 +3,12 @@ ...@@ -3,8 +3,12 @@
* *
* Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/ */
#define FPSWA_DEBUG 1
/* /*
* The fpu_fault() handler needs to be able to access and update all * The fpu_fault() handler needs to be able to access and update all
* floating point registers. Those saved in pt_regs can be accessed * floating point registers. Those saved in pt_regs can be accessed
...@@ -168,7 +172,7 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -168,7 +172,7 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo.si_signo = sig; siginfo.si_signo = sig;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_code = code; siginfo.si_code = code;
send_sig_info(sig, &siginfo, current); force_sig_info(sig, &siginfo, current);
} }
/* /*
...@@ -300,6 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -300,6 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
if (copy_from_user(bundle, (void *) fault_ip, sizeof(bundle))) if (copy_from_user(bundle, (void *) fault_ip, sizeof(bundle)))
return -1; return -1;
#ifdef FPSWA_DEBUG
if (fpu_swa_count > 5 && jiffies - last_time > 5*HZ) if (fpu_swa_count > 5 && jiffies - last_time > 5*HZ)
fpu_swa_count = 0; fpu_swa_count = 0;
if (++fpu_swa_count < 5) { if (++fpu_swa_count < 5) {
...@@ -307,7 +312,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -307,7 +312,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
printk("%s(%d): floating-point assist fault at ip %016lx\n", printk("%s(%d): floating-point assist fault at ip %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri); current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri);
} }
#endif
exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
&regs->cr_ifs, regs); &regs->cr_ifs, regs);
if (fp_fault) { if (fp_fault) {
...@@ -331,7 +336,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -331,7 +336,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
} else if (isr & 0x44) { } else if (isr & 0x44) {
siginfo.si_code = FPE_FLTDIV; siginfo.si_code = FPE_FLTDIV;
} }
send_sig_info(SIGFPE, &siginfo, current); siginfo.si_isr = isr;
force_sig_info(SIGFPE, &siginfo, current);
} }
} else { } else {
if (exception == -1) { if (exception == -1) {
...@@ -350,12 +356,49 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -350,12 +356,49 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
} else if (isr & 0x2200) { } else if (isr & 0x2200) {
siginfo.si_code = FPE_FLTRES; siginfo.si_code = FPE_FLTRES;
} }
send_sig_info(SIGFPE, &siginfo, current); siginfo.si_isr = isr;
force_sig_info(SIGFPE, &siginfo, current);
} }
} }
return 0; return 0;
} }
struct illegal_op_return {
unsigned long fkt, arg1, arg2, arg3;
};
struct illegal_op_return
ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5,
unsigned long arg6, unsigned long arg7, unsigned long stack)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
struct illegal_op_return rv;
struct siginfo si;
char buf[128];
#ifdef CONFIG_IA64_BRL_EMU
{
extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
rv = ia64_emulate_brl(regs, ec);
if (rv.fkt != (unsigned long) -1)
return rv;
}
#endif
sprintf(buf, "IA-64 Illegal operation fault");
die_if_kernel(buf, regs, 0);
memset(&si, 0, sizeof(si));
si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC;
si.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
force_sig_info(SIGILL, &si, current);
rv.fkt = 0;
return rv;
}
void void
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, unsigned long arg5, unsigned long iim, unsigned long itir, unsigned long arg5,
...@@ -450,11 +493,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -450,11 +493,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info(SIGTRAP, &siginfo, current); force_sig_info(SIGTRAP, &siginfo, current);
return; return;
case 30: /* Unaligned fault */
sprintf(buf, "Kernel unaligned trap accessing %016lx (ip=%016lx)!",
ifa, regs->cr_iip + ia64_psr(regs)->ri);
break;
case 32: /* fp fault */ case 32: /* fp fault */
case 33: /* fp trap */ case 33: /* fp trap */
result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr); result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -44,4 +44,4 @@ print_offsets.s: print_offsets.c ...@@ -44,4 +44,4 @@ print_offsets.s: print_offsets.c
endif endif
.PHONY: all .PHONY: all modules
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* $Id: ioport.c,v 1.38 2000/06/04 06:23:52 anton Exp $ /* $Id: ioport.c,v 1.39 2000/06/20 01:10:00 anton Exp $
* ioport.c: Simple io mapping allocator. * ioport.c: Simple io mapping allocator.
* *
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
struct resource *_sparc_find_resource(struct resource *r, unsigned long); struct resource *_sparc_find_resource(struct resource *r, unsigned long);
static void *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); static void *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment