Commit 4494ecd0 authored by Linus Torvalds's avatar Linus Torvalds

Merge

parents b862aa5d be8cec9c
...@@ -211,7 +211,7 @@ config MCKINLEY_A0_SPECIFIC ...@@ -211,7 +211,7 @@ config MCKINLEY_A0_SPECIFIC
config NUMA config NUMA
bool "Enable NUMA support" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 bool "Enable NUMA support" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y if IA64_SGI_SN2 default y if IA64_SGI_SN2 || IA64_GENERIC
help help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor Access). This option is for configuring high-end multiprocessor
...@@ -234,9 +234,8 @@ config IA64_NODESIZE_256GB ...@@ -234,9 +234,8 @@ config IA64_NODESIZE_256GB
endchoice endchoice
config DISCONTIGMEM config DISCONTIGMEM
bool bool "Discontiguous memory support" if (IA64_DIG && NUMA)
depends on IA64_SGI_SN2 || (IA64_GENERIC || IA64_DIG || IA64_HP_ZX1) && NUMA default y if IA64_SGI_SN2 || IA64_GENERIC
default y
help help
Say Y to support efficient handling of discontiguous physical memory, Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access) for architectures which are either NUMA (Non-Uniform Memory Access)
...@@ -245,8 +244,7 @@ config DISCONTIGMEM ...@@ -245,8 +244,7 @@ config DISCONTIGMEM
config VIRTUAL_MEM_MAP config VIRTUAL_MEM_MAP
bool "Enable Virtual Mem Map" bool "Enable Virtual Mem Map"
depends on !NUMA default y if !IA64_HP_SIM
default y if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
help help
Say Y to compile the kernel with support for a virtual mem map. Say Y to compile the kernel with support for a virtual mem map.
This is an alternate method of supporting large holes in the This is an alternate method of supporting large holes in the
...@@ -259,8 +257,8 @@ config VIRTUAL_MEM_MAP ...@@ -259,8 +257,8 @@ config VIRTUAL_MEM_MAP
are unsure, say Y. are unsure, say Y.
config IA64_MCA config IA64_MCA
bool "Enable IA-64 Machine Check Abort" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 bool "Enable IA-64 Machine Check Abort"
default y if IA64_SGI_SN2 default y if !IA64_HP_SIM
help help
Say Y here to enable machine check support for IA-64. If you're Say Y here to enable machine check support for IA-64. If you're
unsure, answer Y. unsure, answer Y.
...@@ -292,14 +290,6 @@ config IOSAPIC ...@@ -292,14 +290,6 @@ config IOSAPIC
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_SGI_SN2 depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_SGI_SN2
default y default y
config IA64_SGI_SN_DEBUG
bool "Enable extra debugging code"
depends on IA64_SGI_SN2
help
Turns on extra debugging code in the SGI SN (Scalable NUMA) platform
for IA-64. Unless you are debugging problems on an SGI SN IA-64 box,
say N.
config IA64_SGI_SN_SIM config IA64_SGI_SN_SIM
bool "Enable SGI Medusa Simulator Support" bool "Enable SGI Medusa Simulator Support"
depends on IA64_SGI_SN2 depends on IA64_SGI_SN2
...@@ -307,29 +297,6 @@ config IA64_SGI_SN_SIM ...@@ -307,29 +297,6 @@ config IA64_SGI_SN_SIM
If you are compiling a kernel that will run under SGI's IA-64 If you are compiling a kernel that will run under SGI's IA-64
simulator (Medusa) then say Y, otherwise say N. simulator (Medusa) then say Y, otherwise say N.
config IA64_SGI_AUTOTEST
bool "Enable autotest (llsc). Option to run cache test instead of booting"
depends on IA64_SGI_SN2
help
Build a kernel used for hardware validation. If you include the
keyword "autotest" on the boot command line, the kernel does NOT boot.
Instead, it starts all cpus and runs cache coherency tests instead.
If unsure, say N.
config SERIAL_SGI_L1_PROTOCOL
bool "Enable protocol mode for the L1 console"
depends on IA64_SGI_SN2
help
Uses protocol mode instead of raw mode for the level 1 console on the
SGI SN (Scalable NUMA) platform for IA-64. If you are compiling for
an SGI SN box then Y is the recommended value, otherwise say N.
config PERCPU_IRQ
bool
depends on IA64_SGI_SN2
default y
# On IA-64, we always want an ELF /proc/kcore. # On IA-64, we always want an ELF /proc/kcore.
config KCORE_ELF config KCORE_ELF
bool bool
......
...@@ -4,3 +4,5 @@ ...@@ -4,3 +4,5 @@
obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \ obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
CFLAGS_ia32_ioctl.o += -Ifs/
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/signal.h> #include <asm/signal.h>
#include "ia32priv.h" #include "ia32priv.h"
#include "elfcore32.h"
#define CONFIG_BINFMT_ELF32 #define CONFIG_BINFMT_ELF32
......
/*
* IA-32 ELF core dump support.
*
* Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
*
* Derived from the x86_64 version
*/
#ifndef _ELFCORE32_H_
#define _ELFCORE32_H_
#define USE_ELF_CORE_DUMP 1
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct ia32_user_i387_struct elf_fpregset_t;
typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
struct elf_siginfo
{
int si_signo; /* signal number */
int si_code; /* extra code */
int si_errno; /* errno */
};
#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
struct elf_prstatus
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define ELF_PRARGSZ (80) /* Number of chars for args */
struct elf_prpsinfo
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__u16 pr_uid;
__u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->r11; \
pr_reg[1] = regs->r9; \
pr_reg[2] = regs->r10; \
pr_reg[3] = regs->r14; \
pr_reg[4] = regs->r15; \
pr_reg[5] = regs->r13; \
pr_reg[6] = regs->r8; \
pr_reg[7] = regs->r16 & 0xffff; \
pr_reg[8] = (regs->r16 >> 16) & 0xffff; \
pr_reg[9] = (regs->r16 >> 32) & 0xffff; \
pr_reg[10] = (regs->r16 >> 48) & 0xffff; \
pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \
asm volatile ("mov %0=ar.eflag ;;" \
: "=r"(pr_reg[14])); \
pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff;
static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
struct pt_regs *regs)
{
ELF_CORE_COPY_REGS((*elfregs), regs)
}
static inline int elf_core_copy_task_regs(struct task_struct *t,
elf_gregset_t* elfregs)
{
struct pt_regs *pp = ia64_task_regs(t);
ELF_CORE_COPY_REGS((*elfregs), pp);
return 1;
}
static inline int
elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
{
struct ia32_user_i387_struct *fpstate = (void*)fpu;
if (!tsk->used_math)
return 0;
save_ia32_fpstate(tsk, fpstate);
return 1;
}
#define ELF_CORE_COPY_XFPREGS 1
static inline int
elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
{
struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
if (!tsk->used_math)
return 0;
save_ia32_fpxstate(tsk, fpxstate);
return 1;
}
#endif /* _ELFCORE32_H_ */
...@@ -440,9 +440,9 @@ ia32_syscall_table: ...@@ -440,9 +440,9 @@ ia32_syscall_table:
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_epoll_create
data8 sys_ni_syscall /*255*/ data8 sys32_epoll_ctl /* 255 */
data8 sys_ni_syscall data8 sys32_epoll_wait
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
......
This diff is collapsed.
...@@ -295,7 +295,6 @@ struct old_linux32_dirent { ...@@ -295,7 +295,6 @@ struct old_linux32_dirent {
#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) #define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) #define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE #define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
/* /*
...@@ -312,20 +311,6 @@ void ia64_elf32_init(struct pt_regs *regs); ...@@ -312,20 +311,6 @@ void ia64_elf32_init(struct pt_regs *regs);
#define elf_addr_t u32 #define elf_addr_t u32
/* ELF register definitions. This is needed for core dump support. */
#define ELF_NGREG 128 /* XXX fix me */
#define ELF_NFPREG 128 /* XXX fix me */
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
unsigned long w0;
unsigned long w1;
} elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/* This macro yields a bitmask that programs can use to figure out /* This macro yields a bitmask that programs can use to figure out
what instruction set this CPU supports. */ what instruction set this CPU supports. */
#define ELF_HWCAP 0 #define ELF_HWCAP 0
...@@ -472,6 +457,23 @@ extern void ia32_load_segment_descriptors (struct task_struct *task); ...@@ -472,6 +457,23 @@ extern void ia32_load_segment_descriptors (struct task_struct *task);
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0) } while(0)
struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax;
unsigned short ds, __ds, es, __es;
unsigned short fs, __fs, gs, __gs;
__u32 orig_eax, eip;
unsigned short cs, __cs;
__u32 eflags, esp;
unsigned short ss, __ss;
};
/* Prototypes for use in elfcore32.h */
int save_ia32_fpstate (struct task_struct *tsk,
struct ia32_user_i387_struct *save);
int save_ia32_fpxstate (struct task_struct *tsk,
struct ia32_user_fxsr_struct *save);
#endif /* !CONFIG_IA32_SUPPORT */ #endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_IA64_IA32_H */ #endif /* _ASM_IA64_IA32_H */
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/nfsd/xdr.h> #include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h> #include <linux/nfsd/syscall.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/eventpoll.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/stat.h> #include <linux/stat.h>
...@@ -1866,7 +1867,7 @@ get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc ...@@ -1866,7 +1867,7 @@ get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
return; return;
} }
static int int
save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save) save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
{ {
struct switch_stack *swp; struct switch_stack *swp;
...@@ -1928,7 +1929,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *sav ...@@ -1928,7 +1929,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *sav
return 0; return 0;
} }
static int int
save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save) save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
{ {
struct switch_stack *swp; struct switch_stack *swp;
...@@ -2704,6 +2705,95 @@ sys32_open (const char * filename, int flags, int mode) ...@@ -2704,6 +2705,95 @@ sys32_open (const char * filename, int flags, int mode)
goto out; goto out;
} }
/* Structure for ia32 emulation on ia64 */
struct epoll_event32
{
u32 events;
u64 data;
} __attribute__((packed));
asmlinkage long
sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
{
mm_segment_t old_fs = get_fs();
struct epoll_event event64;
int error = -EFAULT;
u32 data_halfword;
if ((error = verify_area(VERIFY_READ, event,
sizeof(struct epoll_event32))))
return error;
__get_user(event64.events, &event->events);
__get_user(data_halfword, (u32*)(&event->data));
event64.data = data_halfword;
__get_user(data_halfword, ((u32*)(&event->data) + 1));
event64.data |= ((u64)data_halfword) << 32;
set_fs(KERNEL_DS);
error = sys_epoll_ctl(epfd, op, fd, &event64);
set_fs(old_fs);
return error;
}
asmlinkage long
sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
int timeout)
{
struct epoll_event *events64 = NULL;
mm_segment_t old_fs = get_fs();
int error;
int evt_idx;
if (maxevents <= 0) {
return -EINVAL;
}
/* Verify that the area passed by the user is writeable */
if ((error = verify_area(VERIFY_WRITE, events,
maxevents * sizeof(struct epoll_event32))))
return error;
/* Allocate the space needed for the intermediate copy */
events64 = kmalloc(maxevents * sizeof(struct epoll_event), GFP_KERNEL);
if (events64 == NULL) {
return -ENOMEM;
}
/* Expand the 32-bit structures into the 64-bit structures */
for (evt_idx = 0; evt_idx < maxevents; evt_idx++) {
u32 data_halfword;
__get_user(events64[evt_idx].events, &events[evt_idx].events);
__get_user(data_halfword, (u32*)(&events[evt_idx].data));
events64[evt_idx].data = data_halfword;
__get_user(data_halfword, ((u32*)(&events[evt_idx].data) + 1));
events64[evt_idx].data |= ((u64)data_halfword) << 32;
}
/* Do the system call */
set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
error = sys_epoll_wait(epfd, events64, maxevents, timeout);
set_fs(old_fs);
/* Don't modify userspace memory if we're returning an error */
if (!error) {
/* Translate the 64-bit structures back into the 32-bit
structures */
for (evt_idx = 0; evt_idx < maxevents; evt_idx++) {
__put_user(events64[evt_idx].events,
&events[evt_idx].events);
__put_user((u32)(events64[evt_idx].data),
(u32*)(&events[evt_idx].data));
__put_user((u32)(events64[evt_idx].data >> 32),
((u32*)(&events[evt_idx].data) + 1));
}
}
kfree(events64);
return error;
}
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
struct ncp_mount_data32 { struct ncp_mount_data32 {
......
...@@ -51,13 +51,6 @@ ...@@ -51,13 +51,6 @@
#define PREFIX "ACPI: " #define PREFIX "ACPI: "
asm (".weak iosapic_register_intr");
asm (".weak iosapic_override_isa_irq");
asm (".weak iosapic_register_platform_intr");
asm (".weak iosapic_init");
asm (".weak iosapic_system_init");
asm (".weak iosapic_version");
void (*pm_idle) (void); void (*pm_idle) (void);
void (*pm_power_off) (void); void (*pm_power_off) (void);
...@@ -241,8 +234,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header) ...@@ -241,8 +234,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header)
acpi_table_print_madt_entry(header); acpi_table_print_madt_entry(header);
if (iosapic_init) iosapic_init(iosapic->address, iosapic->global_irq_base);
iosapic_init(iosapic->address, iosapic->global_irq_base);
return 0; return 0;
} }
...@@ -260,11 +252,6 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header) ...@@ -260,11 +252,6 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
acpi_table_print_madt_entry(header); acpi_table_print_madt_entry(header);
if (!iosapic_register_platform_intr) {
printk(KERN_WARNING PREFIX "No ACPI platform interrupt support\n");
return -ENODEV;
}
/* /*
* Get vector assignment for this interrupt, set attributes, * Get vector assignment for this interrupt, set attributes,
* and program the IOSAPIC routing table. * and program the IOSAPIC routing table.
...@@ -293,10 +280,6 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header) ...@@ -293,10 +280,6 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
acpi_table_print_madt_entry(header); acpi_table_print_madt_entry(header);
/* Ignore if the platform doesn't support overrides */
if (!iosapic_override_isa_irq)
return 0;
iosapic_override_isa_irq(p->bus_irq, p->global_irq, iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
...@@ -334,8 +317,7 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) ...@@ -334,8 +317,7 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
#else #else
has_8259 = acpi_madt->flags.pcat_compat; has_8259 = acpi_madt->flags.pcat_compat;
#endif #endif
if (iosapic_system_init) iosapic_system_init(has_8259);
iosapic_system_init(has_8259);
/* Get base address of IPI Message Block */ /* Get base address of IPI Message Block */
...@@ -535,7 +517,6 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) ...@@ -535,7 +517,6 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{ {
struct acpi_table_header *fadt_header; struct acpi_table_header *fadt_header;
struct fadt_descriptor_rev2 *fadt; struct fadt_descriptor_rev2 *fadt;
u32 sci_irq;
if (!phys_addr || !size) if (!phys_addr || !size)
return -EINVAL; return -EINVAL;
...@@ -549,15 +530,7 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) ...@@ -549,15 +530,7 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0; acpi_kbd_controller_present = 0;
if (!iosapic_register_intr) acpi_register_irq(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
return 0; /* just ignore the rest */
sci_irq = fadt->sci_int;
if (has_8259 && sci_irq < 16)
return 0; /* legacy, no setup required */
iosapic_register_intr(sci_irq, IOSAPIC_POL_LOW, IOSAPIC_LEVEL);
return 0; return 0;
} }
...@@ -707,28 +680,23 @@ acpi_get_interrupt_model (int *type) ...@@ -707,28 +680,23 @@ acpi_get_interrupt_model (int *type)
} }
int int
acpi_irq_to_vector (u32 irq) acpi_irq_to_vector (u32 gsi)
{ {
if (has_8259 && irq < 16) if (has_8259 && gsi < 16)
return isa_irq_to_vector(irq); return isa_irq_to_vector(gsi);
return gsi_to_vector(irq); return gsi_to_vector(gsi);
} }
int int
acpi_register_irq (u32 gsi, u32 polarity, u32 trigger) acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
{ {
int vector = 0; if (has_8259 && gsi < 16)
if (has_8259 && (gsi < 16))
return isa_irq_to_vector(gsi); return isa_irq_to_vector(gsi);
if (!iosapic_register_intr) return iosapic_register_intr(gsi,
return 0; (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
/* Turn it on */
vector = iosapic_register_intr (gsi, polarity, trigger);
return vector;
} }
#endif /* CONFIG_ACPI_BOOT */ #endif /* CONFIG_ACPI_BOOT */
...@@ -543,7 +543,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -543,7 +543,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
delivery = IOSAPIC_INIT; delivery = IOSAPIC_INIT;
break; break;
case ACPI_INTERRUPT_CPEI: case ACPI_INTERRUPT_CPEI:
vector = IA64_PCE_VECTOR; vector = IA64_CPE_VECTOR;
delivery = IOSAPIC_LOWEST_PRIORITY; delivery = IOSAPIC_LOWEST_PRIORITY;
break; break;
default: default:
......
This diff is collapsed.
...@@ -70,14 +70,14 @@ GLOBAL_ENTRY(xor_ia64_3) ...@@ -70,14 +70,14 @@ GLOBAL_ENTRY(xor_ia64_3)
adds in0 = -1, in0 adds in0 = -1, in0
mov r16 = in1 mov r16 = in1
mov r17 = in2 mov r17 = in2
;; ;;
mov r18 = in3 mov r18 = in3
mov ar.lc = in0 mov ar.lc = in0
mov pr.rot = 1 << 16 mov pr.rot = 1 << 16
;; ;;
.rotr s1[6+1], s2[6+1], s3[6+1], d[2] .rotr s1[6+1], s2[6+1], s3[6+1], d[2]
.rotp p[6+2] .rotp p[6+2]
0: 0:
(p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s1[0] = [r16], 8
(p[0]) ld8.nta s2[0] = [r17], 8 (p[0]) ld8.nta s2[0] = [r17], 8
(p[6]) xor d[0] = s1[6], s2[6] (p[6]) xor d[0] = s1[6], s2[6]
...@@ -162,7 +162,7 @@ GLOBAL_ENTRY(xor_ia64_5) ...@@ -162,7 +162,7 @@ GLOBAL_ENTRY(xor_ia64_5)
;; ;;
.rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2] .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2]
.rotp p[6+2] .rotp p[6+2]
0: 0:
(p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s1[0] = [r16], 8
(p[0]) ld8.nta s2[0] = [r17], 8 (p[0]) ld8.nta s2[0] = [r17], 8
(p[6]) xor d[0] = s1[6], s2[6] (p[6]) xor d[0] = s1[6], s2[6]
......
...@@ -758,28 +758,6 @@ hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name) ...@@ -758,28 +758,6 @@ hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name)
return(rc); return(rc);
} }
/*
* hwgraph_path_lookup - return the handle for the given path.
*
*/
int
hwgraph_path_lookup(vertex_hdl_t start_vertex_handle,
char *lookup_path,
vertex_hdl_t *vertex_handle_ptr,
char **remainder)
{
*vertex_handle_ptr = hwgfs_find_handle(start_vertex_handle, /* start dir */
lookup_path, /* path */
0, /* major */
0, /* minor */
0, /* char | block */
1); /* traverse symlinks */
if (*vertex_handle_ptr == NULL)
return(-1);
else
return(0);
}
/* /*
* hwgraph_traverse - Find and return the handle starting from de. * hwgraph_traverse - Find and return the handle starting from de.
* *
...@@ -920,6 +898,5 @@ EXPORT_SYMBOL(hwgraph_info_get_exported_LBL); ...@@ -920,6 +898,5 @@ EXPORT_SYMBOL(hwgraph_info_get_exported_LBL);
EXPORT_SYMBOL(hwgraph_info_get_next_LBL); EXPORT_SYMBOL(hwgraph_info_get_next_LBL);
EXPORT_SYMBOL(hwgraph_info_export_LBL); EXPORT_SYMBOL(hwgraph_info_export_LBL);
EXPORT_SYMBOL(hwgraph_info_unexport_LBL); EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
EXPORT_SYMBOL(hwgraph_path_lookup);
EXPORT_SYMBOL(hwgraph_traverse); EXPORT_SYMBOL(hwgraph_traverse);
EXPORT_SYMBOL(hwgraph_vertex_name_get); EXPORT_SYMBOL(hwgraph_vertex_name_get);
...@@ -40,15 +40,12 @@ ...@@ -40,15 +40,12 @@
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dcache.h>
#include <asm/sn/hwgfs.h> #include <asm/sn/hwgfs.h>
extern struct vfsmount *hwgfs_vfsmount; extern struct vfsmount *hwgfs_vfsmount;
/* TODO: Move this to some .h file or, more likely, use a slightly
different interface from lookup_create. */
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
static int static int
walk_parents_mkdir( walk_parents_mkdir(
const char **path, const char **path,
...@@ -69,6 +66,7 @@ walk_parents_mkdir( ...@@ -69,6 +66,7 @@ walk_parents_mkdir(
return error; return error;
nd->dentry = lookup_create(nd, is_dir); nd->dentry = lookup_create(nd, is_dir);
nd->flags |= LOOKUP_PARENT;
if (unlikely(IS_ERR(nd->dentry))) if (unlikely(IS_ERR(nd->dentry)))
return PTR_ERR(nd->dentry); return PTR_ERR(nd->dentry);
......
...@@ -87,11 +87,7 @@ hub_pio_init(vertex_hdl_t hubv) ...@@ -87,11 +87,7 @@ hub_pio_init(vertex_hdl_t hubv)
hub_set_piomode(nasid, HUB_PIO_CONVEYOR); hub_set_piomode(nasid, HUB_PIO_CONVEYOR);
mutex_spinlock_init(&hubinfo->h_bwlock); mutex_spinlock_init(&hubinfo->h_bwlock);
/* init_waitqueue_head(&hubinfo->h_bwwait);
* If this lock can be acquired from interrupts or bh's, add SV_INTS or SV_BHS,
* respectively, to the flags here.
*/
sv_init(&hubinfo->h_bwwait, &hubinfo->h_bwlock, SV_ORDER_FIFO | SV_MON_SPIN);
} }
/* /*
...@@ -215,10 +211,16 @@ hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */ ...@@ -215,10 +211,16 @@ hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
if (flags & PIOMAP_NOSLEEP) { if (flags & PIOMAP_NOSLEEP) {
bw_piomap = NULL; bw_piomap = NULL;
goto done; goto done;
} else {
DECLARE_WAITQUEUE(wait, current);
spin_unlock(&hubinfo->h_bwlock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&hubinfo->h_bwwait, &wait);
schedule();
remove_wait_queue(&hubinfo->h_bwwait, &wait);
goto tryagain;
} }
sv_wait(&hubinfo->h_bwwait, 0, 0);
goto tryagain;
} }
} }
...@@ -316,7 +318,7 @@ hub_piomap_free(hub_piomap_t hub_piomap) ...@@ -316,7 +318,7 @@ hub_piomap_free(hub_piomap_t hub_piomap)
} else } else
hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID; hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID;
(void)sv_signal(&hubinfo->h_bwwait); wake_up(&hubinfo->h_bwwait);
} }
mutex_spinunlock(&hubinfo->h_bwlock, s); mutex_spinunlock(&hubinfo->h_bwlock, s);
......
...@@ -73,3 +73,4 @@ sn_mmiob (void) ...@@ -73,3 +73,4 @@ sn_mmiob (void)
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1); udelay(1);
} }
EXPORT_SYMBOL(sn_mmiob);
...@@ -30,17 +30,13 @@ ...@@ -30,17 +30,13 @@
#include <asm/sn/pci/pcibr_private.h> #include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/pci/bridge.h> #include <asm/sn/pci/bridge.h>
#ifdef DEBUG_CONFIG /*
#define DBG(x...) printk(x) * These routines are only used during sn_pci_init for probing each bus, and
#else * can probably be removed with a little more cleanup now that the SAL routines
#define DBG(x...) * work on sn2.
#endif */
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern vertex_hdl_t pci_bus_to_vertex(unsigned char);
extern vertex_hdl_t devfn_to_vertex(unsigned char bus, unsigned char devfn); extern vertex_hdl_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
...@@ -49,10 +45,12 @@ int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, ...@@ -49,10 +45,12 @@ int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
vertex_hdl_t device_vertex; vertex_hdl_t device_vertex;
device_vertex = devfn_to_vertex(bus->number, devfn); device_vertex = devfn_to_vertex(bus->number, devfn);
if (!device_vertex) if (!device_vertex)
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
res = pciio_config_get(device_vertex, (unsigned) where, size);
*val = (unsigned int) res; res = pciio_config_get(device_vertex, (unsigned)where, size);
*val = (u32)res;
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
...@@ -61,79 +59,21 @@ int sn_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size ...@@ -61,79 +59,21 @@ int sn_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size
vertex_hdl_t device_vertex; vertex_hdl_t device_vertex;
device_vertex = devfn_to_vertex(bus->number, devfn); device_vertex = devfn_to_vertex(bus->number, devfn);
if (!device_vertex) if (!device_vertex)
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
pciio_config_set( device_vertex, (unsigned)where, size, (uint64_t) val);
pciio_config_set(device_vertex, (unsigned)where, size, (uint64_t)val);
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
struct pci_ops sn_pci_ops = { struct pci_ops sn_pci_ops = {
.read = sn_read_config, .read = sn_read_config,
.write = sn_write_config .write = sn_write_config,
}; };
/*
* sn_pci_find_bios - SNIA64 pci_find_bios() platform specific code.
*/
void __init
sn_pci_find_bios(void)
{
extern struct pci_ops *pci_root_ops;
/*
* Go initialize our IO Infrastructure ..
*/
extern void sgi_master_io_infr_init(void);
sgi_master_io_infr_init();
/* sn_io_infrastructure_init(); */
pci_root_ops = &sn_pci_ops;
}
void
pci_fixup_ioc3(struct pci_dev *d)
{
int i;
unsigned int size;
/* IOC3 only decodes 0x20 bytes of the config space, reading
* beyond that is relatively benign but writing beyond that
* (especially the base address registers) will shut down the
* pci bus...so avoid doing so.
* NOTE: this means we can't program the intr_pin into the device,
* currently we hack this with special code in
* sgi_pci_intr_support()
*/
DBG("pci_fixup_ioc3: Fixing base addresses for ioc3 device %s\n", pci_name(d));
/* I happen to know from the spec that the ioc3 needs only 0xfffff
* The standard pci trick of writing ~0 to the baddr and seeing
* what comes back doesn't work with the ioc3
*/
size = 0xfffff;
d->resource[0].end = (unsigned long) d->resource[0].start + (unsigned long) size;
/*
* Zero out the resource structure .. because we did not go through
* the normal PCI Infrastructure Init, garbbage are left in these
* fileds.
*/
for (i = 1; i <= PCI_ROM_RESOURCE; i++) {
d->resource[i].start = 0UL;
d->resource[i].end = 0UL;
d->resource[i].flags = 0UL;
}
d->subsystem_vendor = 0;
d->subsystem_device = 0;
}
#else #else
void sn_pci_find_bios(void) {}
void pci_fixup_ioc3(struct pci_dev *d) {}
struct list_head pci_root_buses; struct list_head pci_root_buses;
struct list_head pci_root_buses; struct list_head pci_root_buses;
struct list_head pci_devices; struct list_head pci_devices;
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
...@@ -411,34 +411,6 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int ...@@ -411,34 +411,6 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
} }
} }
/*
* Most drivers currently do not properly tell the arch specific pci dma
* interfaces whether they can handle A64. Here is where we privately
* keep track of this.
*/
static void __init
set_sn_pci64(struct pci_dev *dev)
{
unsigned short vendor = dev->vendor;
unsigned short device = dev->device;
if (vendor == PCI_VENDOR_ID_QLOGIC) {
if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
(device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
SET_PCIA64(dev);
return;
}
}
if (vendor == PCI_VENDOR_ID_SGI) {
if (device == PCI_DEVICE_ID_SGI_IOC3) {
SET_PCIA64(dev);
return;
}
}
}
/* /*
* sn_pci_fixup() - This routine is called when platform_pci_fixup() is * sn_pci_fixup() - This routine is called when platform_pci_fixup() is
* invoked at the end of pcibios_init() to link the Linux pci * invoked at the end of pcibios_init() to link the Linux pci
...@@ -455,10 +427,9 @@ sn_pci_fixup(int arg) ...@@ -455,10 +427,9 @@ sn_pci_fixup(int arg)
struct sn_widget_sysdata *widget_sysdata; struct sn_widget_sysdata *widget_sysdata;
struct sn_device_sysdata *device_sysdata; struct sn_device_sysdata *device_sysdata;
pciio_intr_t intr_handle; pciio_intr_t intr_handle;
int cpuid, bit; int cpuid;
vertex_hdl_t device_vertex; vertex_hdl_t device_vertex;
pciio_intr_line_t lines; pciio_intr_line_t lines;
extern void sn_pci_find_bios(void);
extern int numnodes; extern int numnodes;
int cnode; int cnode;
...@@ -466,8 +437,11 @@ sn_pci_fixup(int arg) ...@@ -466,8 +437,11 @@ sn_pci_fixup(int arg)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void); extern void register_sn_procfs(void);
#endif #endif
extern void irix_io_init(void);
sn_pci_find_bios();
init_hcl();
irix_io_init();
for (cnode = 0; cnode < numnodes; cnode++) { for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int); extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
intr_init_vecblk(NODEPDA(cnode), cnode, 0); intr_init_vecblk(NODEPDA(cnode), cnode, 0);
...@@ -512,32 +486,25 @@ sn_pci_fixup(int arg) ...@@ -512,32 +486,25 @@ sn_pci_fixup(int arg)
unsigned int irq; unsigned int irq;
int idx; int idx;
u16 cmd; u16 cmd;
vertex_hdl_t vhdl;
unsigned long size; unsigned long size;
extern int bit_pos_to_irq(int); extern int bit_pos_to_irq(int);
if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
extern void pci_fixup_ioc3(struct pci_dev *d);
pci_fixup_ioc3(device_dev);
}
/* Set the device vertex */ /* Set the device vertex */
device_sysdata = kmalloc(sizeof(struct sn_device_sysdata), device_sysdata = kmalloc(sizeof(struct sn_device_sysdata),
GFP_KERNEL); GFP_KERNEL);
device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn); device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
device_sysdata->isa64 = 0; device_sysdata->isa64 = 0;
/* device_vertex = device_sysdata->vhdl;
* Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
* register addresses.
*/
(void) set_flush_addresses(device_dev, device_sysdata);
device_dev->sysdata = (void *) device_sysdata; device_dev->sysdata = (void *) device_sysdata;
set_sn_pci64(device_dev);
set_isPIC(device_sysdata); set_isPIC(device_sysdata);
/*
* Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
* register addresses.
*/
set_flush_addresses(device_dev, device_sysdata);
pci_read_config_word(device_dev, PCI_COMMAND, &cmd); pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
/* /*
...@@ -546,13 +513,12 @@ sn_pci_fixup(int arg) ...@@ -546,13 +513,12 @@ sn_pci_fixup(int arg)
* read from the card and it was set in the card by our * read from the card and it was set in the card by our
* Infrastructure .. * Infrastructure ..
*/ */
vhdl = device_sysdata->vhdl;
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) { for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
size = 0; size = 0;
size = device_dev->resource[idx].end - size = device_dev->resource[idx].end -
device_dev->resource[idx].start; device_dev->resource[idx].start;
if (size) { if (size) {
device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM); device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(device_vertex, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET; device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
} }
else else
...@@ -567,28 +533,6 @@ sn_pci_fixup(int arg) ...@@ -567,28 +533,6 @@ sn_pci_fixup(int arg)
if (device_dev->resource[idx].flags & IORESOURCE_MEM) if (device_dev->resource[idx].flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY; cmd |= PCI_COMMAND_MEMORY;
} }
#if 0
/*
* Software WAR for a Software BUG.
* This is only temporary.
* See PV 872791
*/
/*
* Now handle the ROM resource ..
*/
size = device_dev->resource[PCI_ROM_RESOURCE].end -
device_dev->resource[PCI_ROM_RESOURCE].start;
if (size) {
device_dev->resource[PCI_ROM_RESOURCE].start =
(unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
device_dev->resource[PCI_ROM_RESOURCE].end =
device_dev->resource[PCI_ROM_RESOURCE].start + size;
}
#endif
/* /*
* Update the Command Word on the Card. * Update the Command Word on the Card.
...@@ -596,16 +540,10 @@ sn_pci_fixup(int arg) ...@@ -596,16 +540,10 @@ sn_pci_fixup(int arg)
cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */ cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
/* bit gets dropped .. no harm */ /* bit gets dropped .. no harm */
pci_write_config_word(device_dev, PCI_COMMAND, cmd); pci_write_config_word(device_dev, PCI_COMMAND, cmd);
pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines); pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN,
if (device_dev->vendor == PCI_VENDOR_ID_SGI && (unsigned char *)&lines);
device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
lines = 1;
}
device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
device_vertex = device_sysdata->vhdl;
irqpdaindr->current = device_dev; irqpdaindr->current = device_dev;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex); intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
...@@ -622,7 +560,8 @@ sn_pci_fixup(int arg) ...@@ -622,7 +560,8 @@ sn_pci_fixup(int arg)
size = device_dev->resource[idx].end - size = device_dev->resource[idx].end -
device_dev->resource[idx].start; device_dev->resource[idx].start;
if (size == 0) continue; if (size == 0)
continue;
for (i=0; i<8; i++) { for (i=0; i<8; i++) {
if (ibits & (1 << i) ) { if (ibits & (1 << i) ) {
...@@ -636,22 +575,6 @@ sn_pci_fixup(int arg) ...@@ -636,22 +575,6 @@ sn_pci_fixup(int arg)
} }
} }
#ifdef ajmtestintr
{
int slot = PCI_SLOT(device_dev->devfn);
static int timer_set = 0;
pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
extern void intr_test_handle_intr(int, void*, struct pt_regs *);
if (!timer_set) {
intr_test_set_timer();
timer_set = 1;
}
intr_test_register_irq(irq, pcibr_soft, slot);
request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
}
#endif
} }
/* /*
...@@ -928,3 +851,37 @@ pci_bus_to_hcl_cvlink(void) ...@@ -928,3 +851,37 @@ pci_bus_to_hcl_cvlink(void)
return(0); return(0);
} }
/*
* Ugly hack to get PCI setup until we have a proper ACPI namespace.
*/
extern struct pci_ops sn_pci_ops;
int __init
sn_pci_init (void)
{
# define PCI_BUSES_TO_SCAN 256
int i = 0;
struct pci_controller *controller;
/*
* set pci_raw_ops, etc.
*/
sn_pci_fixup(0);
controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
if (controller) {
memset(controller, 0, sizeof(struct pci_controller));
/* just allocate some devices and fill in the pci_dev structs */
for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, &sn_pci_ops, controller);
}
/*
* actually find devices and fill in hwgraph structs
*/
sn_pci_fixup(1);
return 0;
}
subsys_initcall(sn_pci_init);
...@@ -9,15 +9,13 @@ ...@@ -9,15 +9,13 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h>
#include <asm/sn/sgi.h> #include <asm/sn/sgi.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_cpuid.h> #include <asm/sn/sn_cpuid.h>
#include <asm/sn/klconfig.h> #include <asm/sn/klconfig.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
#include <asm/sn/pda.h> #include <asm/sn/pda.h>
#include <linux/smp.h>
extern int init_hcl(void);
/* /*
* per_hub_init * per_hub_init
...@@ -80,30 +78,3 @@ per_hub_init(cnodeid_t cnode) ...@@ -80,30 +78,3 @@ per_hub_init(cnodeid_t cnode)
/* Initialize error interrupts for this hub. */ /* Initialize error interrupts for this hub. */
hub_error_init(cnode); hub_error_init(cnode);
} }
/*
* This routine is responsible for the setup of all the IRIX hwgraph style
* stuff that's been pulled into linux. It's called by sn_pci_find_bios which
* is called just before the generic Linux PCI layer does its probing (by
* platform_pci_fixup aka sn_pci_fixup).
*
* It is very IMPORTANT that this call is only made by the Master CPU!
*
*/
void
sgi_master_io_infr_init(void)
{
extern void irix_io_init(void);
init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
irix_io_init(); /* Do IRIX Compatibility IO Init */
#ifdef CONFIG_KDB
{
extern void kdba_io_init(void);
kdba_io_init();
}
#endif
}
...@@ -474,11 +474,6 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft, ...@@ -474,11 +474,6 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
*/ */
if (bad) { if (bad) {
pcibr_unlock(pcibr_soft, s); pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
"pcibr_try_set_device: mod blocked by %x\n",
bad, device_bits));
#endif
return bad; return bad;
} }
} }
...@@ -519,13 +514,7 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft, ...@@ -519,13 +514,7 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
} }
pcibr_unlock(pcibr_soft, s); pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
"pcibr_try_set_device: Device(%d): %x\n",
slot, new, device_bits));
#else
printk("pcibr_try_set_device: Device(%d): %x\n", slot, new); printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
#endif
return 0; return 0;
} }
...@@ -824,14 +813,7 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl, ...@@ -824,14 +813,7 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info); slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft = pcibr_soft_get(pcibr_vhdl); pcibr_soft = pcibr_soft_get(pcibr_vhdl);
#ifdef PIC_LATER
/* This may be a loadable driver so lock out any pciconfig actions */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
#endif
pcibr_info->f_att_det_error = error; pcibr_info->f_att_det_error = error;
pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK; pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
if (error) { if (error) {
...@@ -839,11 +821,6 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl, ...@@ -839,11 +821,6 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
} else { } else {
pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT; pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
} }
#ifdef PIC_LATER
/* Release the bus lock */
mrunlock(pcibr_soft->bs_bus_lock);
#endif
} }
/* /*
...@@ -875,14 +852,7 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl, ...@@ -875,14 +852,7 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info); slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft = pcibr_soft_get(pcibr_vhdl); pcibr_soft = pcibr_soft_get(pcibr_vhdl);
#ifdef PIC_LATER
/* This may be a loadable driver so lock out any pciconfig actions */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
#endif
pcibr_info->f_att_det_error = error; pcibr_info->f_att_det_error = error;
pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK; pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
if (error) { if (error) {
...@@ -890,11 +860,6 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl, ...@@ -890,11 +860,6 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
} else { } else {
pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT; pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
} }
#ifdef PIC_LATER
/* Release the bus lock */
mrunlock(pcibr_soft->bs_bus_lock);
#endif
} }
/* /*
...@@ -1245,9 +1210,6 @@ pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge, ...@@ -1245,9 +1210,6 @@ pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
* Initialize bridge and bus locks * Initialize bridge and bus locks
*/ */
spin_lock_init(&pcibr_soft->bs_lock); spin_lock_init(&pcibr_soft->bs_lock);
#ifdef PIC_LATER
mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
#endif
/* /*
* If we have one, process the hints structure. * If we have one, process the hints structure.
*/ */
...@@ -2250,17 +2212,10 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl, ...@@ -2250,17 +2212,10 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
* arguments fails so sprintf() it into a temporary string. * arguments fails so sprintf() it into a temporary string.
*/ */
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) { if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
"slot %d allocates DevIO(%d) Device(%d) set to %x\n",
space, space_desc, pci_addr, pci_addr + req_size - 1,
slot, win, win, devreg, device_bits);
#else
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for " sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
"slot %d allocates DevIO(%d) Device(%d) set to %lx\n", "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
(unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1), (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
(unsigned int)slot, win, win, (unsigned long)devreg); (unsigned int)slot, win, win, (unsigned long)devreg);
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str)); PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
} }
goto done; goto done;
...@@ -2291,11 +2246,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl, ...@@ -2291,11 +2246,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
pcibr_info->f_window[bar].w_devio_index = win; pcibr_info->f_window[bar].w_devio_index = win;
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) { if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
"slot %d uses DevIO(%d)\n", space, space_desc, pci_addr,
pci_addr + req_size - 1, slot, win);
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str)); PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
} }
goto done; goto done;
...@@ -2392,14 +2342,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl, ...@@ -2392,14 +2342,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
; ;
} else if (bfo != 0) { /* we have a conflict. */ } else if (bfo != 0) { /* we have a conflict. */
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) { if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %x, "
"was%s%s, want%s%s\n", space, space_desc,
bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str)); PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
} }
xio_addr = XIO_NOWHERE; xio_addr = XIO_NOWHERE;
...@@ -2432,12 +2374,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl, ...@@ -2432,12 +2374,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
*bfp = bfn; /* record the assignment */ *bfp = bfn; /* record the assignment */
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) { if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %x set "
"to%s%s\n", space, space_desc,
bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str)); PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
} }
} }
...@@ -2740,10 +2676,6 @@ pcibr_piospace_free(vertex_hdl_t pconn_vhdl, ...@@ -2740,10 +2676,6 @@ pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
size_t req_size) size_t req_size)
{ {
pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl); pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
#ifdef PIC_LATER
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
#endif
pciio_piospace_t piosp; pciio_piospace_t piosp;
unsigned long s; unsigned long s;
char name[1024]; char name[1024];
...@@ -3395,10 +3327,6 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap, ...@@ -3395,10 +3327,6 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
void void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap) pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
{ {
#ifdef PIC_LATER
pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
#endif
/* /*
* We could go through and invalidate ATEs here; * We could go through and invalidate ATEs here;
* for performance reasons, we don't. * for performance reasons, we don't.
...@@ -3719,72 +3647,8 @@ pcibr_provider_shutdown(vertex_hdl_t pcibr) ...@@ -3719,72 +3647,8 @@ pcibr_provider_shutdown(vertex_hdl_t pcibr)
int int
pcibr_reset(vertex_hdl_t conn) pcibr_reset(vertex_hdl_t conn)
{ {
#ifdef PIC_LATER
pciio_info_t pciio_info = pciio_info_get(conn);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t ctlreg;
unsigned cfgctl[8];
unsigned long s;
int f, nf;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
int win;
int error = 0;
#endif /* PIC_LATER */
BUG(); BUG();
#ifdef PIC_LATER return -1;
if (pcibr_soft->bs_slot[pciio_slot].has_host) {
pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
}
if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
(pciio_slot <= pcibr_soft->bs_last_reset)) {
s = pcibr_lock(pcibr_soft);
nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
for (f = 0; f < nf; ++f)
if (pcibr_infoh[f])
cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
PCI_CFG_COMMAND/4);
error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
NULL);
ctlreg = bridge->b_wid_control;
bridge->b_wid_control = ctlreg & ~BRIDGE_CTRL_RST_PIN(pciio_slot);
nano_delay(&ts);
bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST_PIN(pciio_slot);
nano_delay(&ts);
for (f = 0; f < nf; ++f)
if ((pcibr_info = pcibr_infoh[f]))
for (win = 0; win < 6; ++win)
if (pcibr_info->f_window[win].w_base != 0)
pcibr_func_config_set(bridge, pciio_slot, f,
PCI_CFG_BASE_ADDR(win) / 4,
pcibr_info->f_window[win].w_base);
for (f = 0; f < nf; ++f)
if (pcibr_infoh[f])
pcibr_func_config_set(bridge, pciio_slot, f,
PCI_CFG_COMMAND / 4,
cfgctl[f]);
pcibr_unlock(pcibr_soft, s);
if (error)
return(-1);
return 0;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
"pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
#endif /* PIC_LATER */
return -1;
} }
pciio_endian_t pciio_endian_t
...@@ -3836,13 +3700,7 @@ pcibr_endian_set(vertex_hdl_t pconn_vhdl, ...@@ -3836,13 +3700,7 @@ pcibr_endian_set(vertex_hdl_t pconn_vhdl,
} }
pcibr_unlock(pcibr_soft, s); pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_endian_set: Device(%d): %x\n",
pciio_slot, devreg, device_bits));
#else
printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg); printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
#endif
return desired_end; return desired_end;
} }
...@@ -4026,13 +3884,7 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl, ...@@ -4026,13 +3884,7 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
} }
} }
pcibr_unlock(pcibr_soft, s); pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_device_flags_set: Device(%d): %x\n",
pciio_slot, devreg, device_bits));
#else
printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg); printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
#endif
} }
return (1); return (1);
} }
......
...@@ -199,10 +199,9 @@ shubstats_ioctl(struct inode *inode, struct file *file, ...@@ -199,10 +199,9 @@ shubstats_ioctl(struct inode *inode, struct file *file,
{ {
cnodeid_t cnode; cnodeid_t cnode;
uint64_t longarg; uint64_t longarg;
vertex_hdl_t d;
int nasid; int nasid;
cnode = (cnodeid_t)hwgraph_fastinfo_get(d); cnode = (cnodeid_t)file->f_dentry->d_fsdata;
switch (cmd) { switch (cmd) {
case SNDRV_SHUB_CONFIGURE: case SNDRV_SHUB_CONFIGURE:
......
...@@ -52,7 +52,6 @@ void ...@@ -52,7 +52,6 @@ void
hub_error_clear(nasid_t nasid) hub_error_clear(nasid_t nasid)
{ {
int i; int i;
hubreg_t idsr;
/* /*
* Make sure spurious write response errors are cleared * Make sure spurious write response errors are cleared
......
...@@ -30,40 +30,8 @@ ...@@ -30,40 +30,8 @@
*/ */
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#define DEV_FUNC(dev,func) xbow_##func
#if !defined(DEV_FUNC)
/*
* There is more than one possible provider
* for this platform. We need to examine the
* master vertex of the current vertex for
* a provider function structure, and indirect
* through the appropriately named member.
*/
#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
static xswitch_provider_t *
xwidget_to_provider_fns(vertex_hdl_t xconn)
{
vertex_hdl_t busv;
xswitch_info_t xswitch_info;
xswitch_provider_t provider_fns;
busv = hwgraph_connectpt_get(xconn_vhdl);
ASSERT(busv != GRAPH_VERTEX_NONE);
xswitch_info = xswitch_info_get(busv);
ASSERT(xswitch_info != NULL);
provider_fns = xswitch_info->xswitch_fns;
ASSERT(provider_fns != NULL);
return provider_fns;
}
#endif
#define XSWITCH_CENSUS_BIT(port) (1<<(port)) #define XSWITCH_CENSUS_BIT(port) (1<<(port))
#define XSWITCH_CENSUS_PORT_MIN (0x0)
#define XSWITCH_CENSUS_PORT_MAX (0xF) #define XSWITCH_CENSUS_PORT_MAX (0xF)
#define XSWITCH_CENSUS_PORTS (0x10) #define XSWITCH_CENSUS_PORTS (0x10)
#define XSWITCH_WIDGET_PRESENT(infop,port) ((infop)->census & XSWITCH_CENSUS_BIT(port)) #define XSWITCH_WIDGET_PRESENT(infop,port) ((infop)->census & XSWITCH_CENSUS_BIT(port))
...@@ -94,28 +62,20 @@ xswitch_info_vhdl_set(xswitch_info_t xswitch_info, ...@@ -94,28 +62,20 @@ xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xwidgetnum_t port, xwidgetnum_t port,
vertex_hdl_t xwidget) vertex_hdl_t xwidget)
{ {
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX) if (port > XSWITCH_CENSUS_PORT_MAX)
return; return;
xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget; xswitch_info->vhdl[port] = xwidget;
} }
vertex_hdl_t vertex_hdl_t
xswitch_info_vhdl_get(xswitch_info_t xswitch_info, xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
xwidgetnum_t port) xwidgetnum_t port)
{ {
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX) if (port > XSWITCH_CENSUS_PORT_MAX)
return GRAPH_VERTEX_NONE; return GRAPH_VERTEX_NONE;
return xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN]; return xswitch_info->vhdl[port];
} }
/* /*
...@@ -128,28 +88,20 @@ xswitch_info_master_assignment_set(xswitch_info_t xswitch_info, ...@@ -128,28 +88,20 @@ xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xwidgetnum_t port, xwidgetnum_t port,
vertex_hdl_t master_vhdl) vertex_hdl_t master_vhdl)
{ {
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX) if (port > XSWITCH_CENSUS_PORT_MAX)
return; return;
xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl; xswitch_info->master_vhdl[port] = master_vhdl;
} }
vertex_hdl_t vertex_hdl_t
xswitch_info_master_assignment_get(xswitch_info_t xswitch_info, xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
xwidgetnum_t port) xwidgetnum_t port)
{ {
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX) if (port > XSWITCH_CENSUS_PORT_MAX)
return GRAPH_VERTEX_NONE; return GRAPH_VERTEX_NONE;
return xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN]; return xswitch_info->master_vhdl[port];
} }
void void
...@@ -170,9 +122,7 @@ xswitch_info_new(vertex_hdl_t xwidget) ...@@ -170,9 +122,7 @@ xswitch_info_new(vertex_hdl_t xwidget)
NEW(xswitch_info); NEW(xswitch_info);
xswitch_info->census = 0; xswitch_info->census = 0;
for (port = XSWITCH_CENSUS_PORT_MIN; for (port = 0; port <= XSWITCH_CENSUS_PORT_MAX; port++) {
port <= XSWITCH_CENSUS_PORT_MAX;
port++) {
xswitch_info_vhdl_set(xswitch_info, port, xswitch_info_vhdl_set(xswitch_info, port,
GRAPH_VERTEX_NONE); GRAPH_VERTEX_NONE);
...@@ -204,11 +154,6 @@ xswitch_info_link_is_ok(xswitch_info_t xswitch_info, xwidgetnum_t port) ...@@ -204,11 +154,6 @@ xswitch_info_link_is_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
int int
xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port) xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
{ {
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return 0;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX) if (port > XSWITCH_CENSUS_PORT_MAX)
return 0; return 0;
...@@ -218,6 +163,5 @@ xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port) ...@@ -218,6 +163,5 @@ xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
int int
xswitch_reset_link(vertex_hdl_t xconn_vhdl) xswitch_reset_link(vertex_hdl_t xconn_vhdl)
{ {
return DEV_FUNC(xconn_vhdl, reset_link) return xbow_reset_link(xconn_vhdl);
(xconn_vhdl);
} }
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y := probe.o setup.o sv.o bte.o irq.o mca.o \ obj-y += probe.o setup.o bte.o irq.o mca.o idle.o sn2/
idle.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o obj-$(CONFIG_MODULES) += sn_ksyms.o
...@@ -121,14 +121,17 @@ sn_cpei_handler(int irq, void *devid, struct pt_regs *regs) ...@@ -121,14 +121,17 @@ sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
static void static void
sn_cpei_timer_handler(unsigned long dummy) { sn_cpei_timer_handler(unsigned long dummy)
{
sn_cpei_handler(-1, NULL, NULL); sn_cpei_handler(-1, NULL, NULL);
mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL); mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
} }
void void
sn_init_cpei_timer() { sn_init_cpei_timer(void)
{
init_timer(&sn_cpei_timer);
sn_cpei_timer.expires = jiffies + CPEI_INTERVAL; sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
sn_cpei_timer.function = sn_cpei_timer_handler; sn_cpei_timer.function = sn_cpei_timer_handler;
add_timer(&sn_cpei_timer); add_timer(&sn_cpei_timer);
} }
...@@ -280,7 +280,7 @@ sn_setup(char **cmdline_p) ...@@ -280,7 +280,7 @@ sn_setup(char **cmdline_p)
else else
sn_rtc_cycles_per_second = ticks_per_sec; sn_rtc_cycles_per_second = ticks_per_sec;
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_PCE_VECTOR; platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
if ( IS_RUNNING_ON_SIMULATOR() ) if ( IS_RUNNING_ON_SIMULATOR() )
......
...@@ -8,7 +8,9 @@ ...@@ -8,7 +8,9 @@
* *
*/ */
#include <asm/pgalloc.h> #include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/system.h>
/** /**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4) * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
...@@ -24,5 +26,12 @@ void ...@@ -24,5 +26,12 @@ void
sn_flush_all_caches(long flush_addr, long bytes) sn_flush_all_caches(long flush_addr, long bytes)
{ {
flush_icache_range(flush_addr, flush_addr+bytes); flush_icache_range(flush_addr, flush_addr+bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
flush_icache_range(flush_addr, flush_addr+bytes);
mb(); mb();
} }
EXPORT_SYMBOL(sn_flush_all_caches);
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/module.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -214,6 +215,7 @@ sn_send_IPI_phys(long physid, int vector, int delivery_mode) ...@@ -214,6 +215,7 @@ sn_send_IPI_phys(long physid, int vector, int delivery_mode)
} }
} }
EXPORT_SYMBOL(sn_send_IPI_phys);
/** /**
* sn2_send_IPI - send an IPI to a processor * sn2_send_IPI - send an IPI to a processor
......
...@@ -13,48 +13,25 @@ ...@@ -13,48 +13,25 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <linux/mm.h>
#include <asm/sn/sgi.h> #include <asm/sn/sgi.h>
extern vertex_hdl_t base_io_scsi_ctlr_vhdl[];
#include <asm/sn/types.h> #include <asm/sn/types.h>
extern cnodeid_t master_node_get(devfs_handle_t vhdl);
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
EXPORT_SYMBOL(base_io_scsi_ctlr_vhdl); #include <asm/sn/bte.h>
EXPORT_SYMBOL(master_node_get); #include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#ifdef CONFIG_IA64_SGI_SN_DEBUG #ifdef CONFIG_IA64_SGI_SN_DEBUG
EXPORT_SYMBOL(__pa_debug); EXPORT_SYMBOL(__pa_debug);
EXPORT_SYMBOL(__va_debug); EXPORT_SYMBOL(__va_debug);
#endif #endif
/* Support IPIs for loaded modules. */
EXPORT_SYMBOL(sn_send_IPI_phys);
/* symbols referenced by partitioning modules */
#include <asm/sn/bte.h>
EXPORT_SYMBOL(bte_copy); EXPORT_SYMBOL(bte_copy);
EXPORT_SYMBOL(bte_unaligned_copy); EXPORT_SYMBOL(bte_unaligned_copy);
#include <asm/sal.h>
EXPORT_SYMBOL(ia64_sal); EXPORT_SYMBOL(ia64_sal);
EXPORT_SYMBOL(physical_node_map);
#include <asm/sn/sn_sal.h>
EXPORT_SYMBOL(sal_lock); EXPORT_SYMBOL(sal_lock);
EXPORT_SYMBOL(sn_partid);
EXPORT_SYMBOL(sn_local_partid); EXPORT_SYMBOL(sn_local_partid);
EXPORT_SYMBOL(sn_system_serial_number_string);
EXPORT_SYMBOL(sn_partition_serial_number);
EXPORT_SYMBOL(sn_mmiob);
/* added by tduffy 04.08.01 to fix depmod issues */
#include <linux/mmzone.h>
extern nasid_t master_nasid;
EXPORT_SYMBOL(master_nasid);
EXPORT_SYMBOL(sn_flush_all_caches);
This diff is collapsed.
...@@ -133,7 +133,7 @@ static __inline__ void * ...@@ -133,7 +133,7 @@ static __inline__ void *
compat_alloc_user_space (long len) compat_alloc_user_space (long len)
{ {
struct pt_regs *regs = ia64_task_regs(current); struct pt_regs *regs = ia64_task_regs(current);
return (void *) ((regs->r12 & -16) - len); return (void *) (((regs->r12 & 0xffffffff) & -16) - len);
} }
#endif /* _ASM_IA64_COMPAT_H */ #endif /* _ASM_IA64_COMPAT_H */
...@@ -89,6 +89,7 @@ ...@@ -89,6 +89,7 @@
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# include <linux/smp_lock.h>
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
......
...@@ -38,7 +38,9 @@ typedef u8 ia64_vector; ...@@ -38,7 +38,9 @@ typedef u8 ia64_vector;
/* /*
* Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI. * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
*/ */
#define IA64_PCE_VECTOR 0x1e /* platform corrected error interrupt vector */ #define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
#define IA64_CMCP_VECTOR 0x1d /* correctable machine-check polling vector */
#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
#define IA64_CMC_VECTOR 0x1f /* correctable machine-check interrupt vector */ #define IA64_CMC_VECTOR 0x1f /* correctable machine-check interrupt vector */
/* /*
* Vectors 0x20-0x2f are reserved for legacy ISA IRQs. * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC
extern void __init iosapic_system_init (int pcat_compat); extern void __init iosapic_system_init (int pcat_compat);
extern void __init iosapic_init (unsigned long address, extern void __init iosapic_init (unsigned long address,
unsigned int gsi_base); unsigned int gsi_base);
...@@ -72,6 +73,14 @@ extern int __init iosapic_register_platform_intr (u32 int_type, ...@@ -72,6 +73,14 @@ extern int __init iosapic_register_platform_intr (u32 int_type,
extern unsigned int iosapic_version (char *addr); extern unsigned int iosapic_version (char *addr);
extern void iosapic_pci_fixup (int); extern void iosapic_pci_fixup (int);
#else
#define iosapic_system_init(pcat_compat) do { } while (0)
#define iosapic_init(address,gsi_base) do { } while (0)
#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
polarity,trigger) (gsi)
#endif
# endif /* !__ASSEMBLY__ */ # endif /* !__ASSEMBLY__ */
#endif /* __ASM_IA64_IOSAPIC_H */ #endif /* __ASM_IA64_IOSAPIC_H */
...@@ -137,7 +137,9 @@ extern void ia64_slave_init_handler(void); ...@@ -137,7 +137,9 @@ extern void ia64_slave_init_handler(void);
extern irqreturn_t ia64_mca_rendez_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_rendez_int_handler(int,void *,struct pt_regs *);
extern irqreturn_t ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *);
extern irqreturn_t ia64_mca_cmc_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_cmc_int_handler(int,void *,struct pt_regs *);
extern irqreturn_t ia64_mca_cmc_int_caller(int,void *,struct pt_regs *);
extern irqreturn_t ia64_mca_cpe_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_cpe_int_handler(int,void *,struct pt_regs *);
extern irqreturn_t ia64_mca_cpe_int_caller(int,void *,struct pt_regs *);
extern int ia64_log_print(int,prfunc_t); extern int ia64_log_print(int,prfunc_t);
extern void ia64_mca_cmc_vector_setup(void); extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_mca_check_errors(void); extern int ia64_mca_check_errors(void);
......
...@@ -45,6 +45,7 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); ...@@ -45,6 +45,7 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
extern void setup_per_cpu_areas (void);
#else /* ! SMP */ #else /* ! SMP */
...@@ -56,10 +57,6 @@ extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); ...@@ -56,10 +57,6 @@ extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
/* ia64-specific part: */
extern void setup_per_cpu_areas (void);
/* /*
* Be extremely careful when taking the address of this variable! Due to virtual * Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)! * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
......
...@@ -68,6 +68,13 @@ extern spinlock_t sal_lock; ...@@ -68,6 +68,13 @@ extern spinlock_t sal_lock;
ia64_load_scratch_fpregs(__ia64_scn_fr); \ ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0) } while (0)
# define SAL_CALL_REENTRANT(result,args...) do { \
struct ia64_fpreg __ia64_scs_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scs_fr); \
__SAL_CALL(result, args); \
ia64_load_scratch_fpregs(__ia64_scs_fr); \
} while (0)
#define SAL_SET_VECTORS 0x01000000 #define SAL_SET_VECTORS 0x01000000
#define SAL_GET_STATE_INFO 0x01000001 #define SAL_GET_STATE_INFO 0x01000001
#define SAL_GET_STATE_INFO_SIZE 0x01000002 #define SAL_GET_STATE_INFO_SIZE 0x01000002
...@@ -665,8 +672,8 @@ static inline s64 ...@@ -665,8 +672,8 @@ static inline s64
ia64_sal_clear_state_info (u64 sal_info_type) ia64_sal_clear_state_info (u64 sal_info_type)
{ {
struct ia64_sal_retval isrv; struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0, SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
0, 0, 0, 0, 0); 0, 0, 0, 0, 0);
return isrv.status; return isrv.status;
} }
...@@ -678,8 +685,8 @@ static inline u64 ...@@ -678,8 +685,8 @@ static inline u64
ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info) ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
{ {
struct ia64_sal_retval isrv; struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_GET_STATE_INFO, sal_info_type, 0, SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
sal_info, 0, 0, 0, 0); sal_info, 0, 0, 0, 0);
if (isrv.status) if (isrv.status)
return 0; return 0;
...@@ -694,8 +701,8 @@ static inline u64 ...@@ -694,8 +701,8 @@ static inline u64
ia64_sal_get_state_info_size (u64 sal_info_type) ia64_sal_get_state_info_size (u64 sal_info_type)
{ {
struct ia64_sal_retval isrv; struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0, SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
0, 0, 0, 0, 0); 0, 0, 0, 0, 0);
if (isrv.status) if (isrv.status)
return 0; return 0;
return isrv.v0; return isrv.v0;
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#ifndef _ASM_IA64_SN_DMAMAP_H #ifndef _ASM_IA64_SN_DMAMAP_H
#define _ASM_IA64_SN_DMAMAP_H #define _ASM_IA64_SN_DMAMAP_H
#include <asm/sn/sv.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
...@@ -66,8 +64,6 @@ extern struct map *a32map[]; ...@@ -66,8 +64,6 @@ extern struct map *a32map[];
extern int a24_mapsize; extern int a24_mapsize;
extern int a32_mapsize; extern int a32_mapsize;
extern sv_t dmamapout;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -99,12 +99,12 @@ extern int hwgraph_info_replace_LBL(vertex_hdl_t, char *, arbitrary_info_t, ...@@ -99,12 +99,12 @@ extern int hwgraph_info_replace_LBL(vertex_hdl_t, char *, arbitrary_info_t,
extern int hwgraph_info_get_exported_LBL(vertex_hdl_t, char *, int *, arbitrary_info_t *); extern int hwgraph_info_get_exported_LBL(vertex_hdl_t, char *, int *, arbitrary_info_t *);
extern int hwgraph_info_get_next_LBL(vertex_hdl_t, char *, arbitrary_info_t *, extern int hwgraph_info_get_next_LBL(vertex_hdl_t, char *, arbitrary_info_t *,
labelcl_info_place_t *); labelcl_info_place_t *);
extern int hwgraph_path_lookup(vertex_hdl_t, char *, vertex_hdl_t *, char **);
extern int hwgraph_info_export_LBL(vertex_hdl_t, char *, int); extern int hwgraph_info_export_LBL(vertex_hdl_t, char *, int);
extern int hwgraph_info_unexport_LBL(vertex_hdl_t, char *); extern int hwgraph_info_unexport_LBL(vertex_hdl_t, char *);
extern int hwgraph_info_remove_LBL(vertex_hdl_t, char *, arbitrary_info_t *); extern int hwgraph_info_remove_LBL(vertex_hdl_t, char *, arbitrary_info_t *);
extern char * vertex_to_name(vertex_hdl_t, char *, uint); extern char * vertex_to_name(vertex_hdl_t, char *, uint);
extern graph_error_t hwgraph_vertex_unref(vertex_hdl_t); extern graph_error_t hwgraph_vertex_unref(vertex_hdl_t);
extern int init_hcl(void);
#endif /* _ASM_IA64_SN_HCL_H */ #endif /* _ASM_IA64_SN_HCL_H */
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <asm/sn/vector.h> #include <asm/sn/vector.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/sn/sv.h>
/* L1 Target Addresses */ /* L1 Target Addresses */
/* /*
......
...@@ -127,8 +127,7 @@ typedef struct irqpda_s irqpda_t; ...@@ -127,8 +127,7 @@ typedef struct irqpda_s irqpda_t;
* Check if given a compact node id the corresponding node has all the * Check if given a compact node id the corresponding node has all the
* cpus disabled. * cpus disabled.
*/ */
#define is_headless_node(cnode) ((cnode == CNODEID_NONE) || \ #define is_headless_node(cnode) (!test_bit(cnode, &node_has_active_cpus))
(node_data(cnode)->active_cpu_count == 0))
/* /*
* Check if given a node vertex handle the corresponding node has all the * Check if given a node vertex handle the corresponding node has all the
......
...@@ -97,7 +97,6 @@ extern void setup_replication_mask(int maxnodes); ...@@ -97,7 +97,6 @@ extern void setup_replication_mask(int maxnodes);
/* init.c */ /* init.c */
extern cnodeid_t get_compact_nodeid(void); /* get compact node id */ extern cnodeid_t get_compact_nodeid(void); /* get compact node id */
extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node); extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
extern void per_cpu_init(void);
extern int is_fine_dirmode(void); extern int is_fine_dirmode(void);
extern void update_node_information(cnodeid_t); extern void update_node_information(cnodeid_t);
...@@ -177,7 +176,7 @@ typedef struct hubinfo_s { ...@@ -177,7 +176,7 @@ typedef struct hubinfo_s {
/* structures for PIO management */ /* structures for PIO management */
xwidgetnum_t h_widgetid; /* my widget # (as viewed from xbow) */ xwidgetnum_t h_widgetid; /* my widget # (as viewed from xbow) */
struct hub_piomap_s h_small_window_piomap[HUB_WIDGET_ID_MAX+1]; struct hub_piomap_s h_small_window_piomap[HUB_WIDGET_ID_MAX+1];
sv_t h_bwwait; /* wait for big window to free */ wait_queue_head_t h_bwwait; /* wait for big window to free */
spinlock_t h_bwlock; /* guard big window piomap's */ spinlock_t h_bwlock; /* guard big window piomap's */
spinlock_t h_crblock; /* gaurd CRB error handling */ spinlock_t h_crblock; /* gaurd CRB error handling */
int h_num_big_window_fixed; /* count number of FIXED maps */ int h_num_big_window_fixed; /* count number of FIXED maps */
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This implemenation of synchronization variables is heavily based on
* one done by Steve Lord <lord@sgi.com>
*
* Paul Cassella <pwc@sgi.com>
*/
#ifndef _ASM_IA64_SN_SV_H
#define _ASM_IA64_SN_SV_H
#include <linux/spinlock.h>
#include <asm/semaphore.h>
#ifndef ASSERT
#define ASSERT(x) do { \
if(!(x)) { \
printk(KERN_ERR "%s\n", "Assertion failed: " # x); \
BUG(); \
} \
} while(0)
#define _SV_ASSERT
#endif
typedef void sv_mon_lock_t;
typedef void (*sv_mon_unlock_func_t)(sv_mon_lock_t *lock);
/* sv_flags values: */
#define SV_ORDER_FIFO 0x001
#define SV_ORDER_FILO 0x002
#define SV_ORDER_LIFO SV_ORDER_FILO
/* If at some point one order becomes preferable to others, we can
switch to it if the caller of sv_init doesn't specify. */
#define SV_ORDER_DEFAULT SV_ORDER_FIFO
#define SV_ORDER_MASK 0x00f
#define SV_MON_SEMA 0x010
#define SV_MON_SPIN 0x020
#define SV_MON_MASK 0x0f0
/*
If the monitor lock can be aquired from interrupts. Note that this
is a superset of the cases in which the sv can be touched from
interrupts.
This is currently only valid when the monitor lock is a spinlock.
If this is used, sv_wait, sv_signal, and sv_broadcast must all be
called with interrupts disabled, which has to happen anyway to have
acquired the monitor spinlock.
*/
#define SV_INTS 0x100
/* ditto for bottom halves */
#define SV_BHS 0x200
/* sv_wait_flag values: */
#define SV_WAIT_SIG 0x001 /* Allow sv_wait to be interrupted by a signal */
typedef struct sv_s {
wait_queue_head_t sv_waiters;
sv_mon_lock_t *sv_mon_lock; /* Lock held for exclusive access to monitor. */
sv_mon_unlock_func_t sv_mon_unlock_func;
spinlock_t sv_lock; /* Spinlock protecting the sv itself. */
int sv_flags;
} sv_t;
#define DECLARE_SYNC_VARIABLE(sv, l, f) sv_t sv = sv_init(&sv, l, f)
/*
* @sv the sync variable to initialize
* @monitor_lock the lock enforcing exclusive running in the monitor
* @flags one of
* SV_MON_SEMA monitor_lock is a semaphore
* SV_MON_SPIN monitor_lock is a spinlock
* and a bitwise or of some subset of
* SV_INTS - the monitor lock can be acquired from interrupts (and
* hence, whenever we hold it, interrupts are disabled or
* we're in an interrupt.) This is only valid when
* SV_MON_SPIN is set.
*/
void sv_init(sv_t *sv, sv_mon_lock_t *monitor_lock, int flags);
/*
* Set SV_WAIT_SIG in sv_wait_flags to let the sv_wait be interrupted by signals.
*
* timeout is how long to wait before giving up, or 0 to wait
* indefinitely. It is given in jiffies, and is relative.
*
* The associated lock must be locked on entry. It is unlocked on return.
*
* Return values:
*
* n < 0 : interrupted, -n jiffies remaining on timeout, or -1 if timeout == 0
* n = 0 : timeout expired
* n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
*/
extern signed long sv_wait(sv_t *sv, int sv_wait_flags,
unsigned long timeout /* relative jiffies */);
static inline int sv_wait_compat(sv_t *sv, sv_mon_lock_t *lock, int sv_wait_flags,
unsigned long timeout, int sv_mon_type)
{
ASSERT(sv_mon_type == (sv->sv_flags & SV_MON_MASK));
if(sv->sv_mon_lock)
ASSERT(lock == sv->sv_mon_lock);
else
sv->sv_mon_lock = lock;
return sv_wait(sv, sv_wait_flags, timeout);
}
/* These work like Irix's sv_wait() and sv_wait_sig(), except the
caller must call the one correpsonding to the type of the monitor
lock. */
#define sv_spin_wait(sv, lock) \
sv_wait_compat(sv, lock, 0, 0, SV_MON_SPIN)
#define sv_spin_wait_sig(sv, lock) \
sv_wait_compat(sv, lock, SV_WAIT_SIG, 0, SV_MON_SPIN)
#define sv_sema_wait(sv, lock) \
sv_wait_compat(sv, lock, 0, 0, SV_MON_SEMA)
#define sv_sema_wait_sig(sv, lock) \
sv_wait_compat(sv, lock, SV_WAIT_SIG, 0, SV_MON_SEMA)
/* These work as in Irix. */
void sv_signal(sv_t *sv);
void sv_broadcast(sv_t *sv);
/* This works as in Irix. */
void sv_destroy(sv_t *sv);
#ifdef _SV_ASSERT
#undef ASSERT
#undef _SV_ASSERT
#endif
#endif /* _ASM_IA64_SN_SV_H */
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
/* /*
* User-level device driver visible types * User-level device driver visible types
*/ */
typedef char xwidgetnum_t; /* xtalk widget number (0..15) */ typedef int xwidgetnum_t; /* xtalk widget number (0..15) */
#define XWIDGET_NONE (-1) #define XWIDGET_NONE (-1)
......
...@@ -2,10 +2,15 @@ ...@@ -2,10 +2,15 @@
#define _ASM_IA64_STATFS_H #define _ASM_IA64_STATFS_H
/* /*
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2003 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef __KERNEL_STRICT_NAMES
# include <linux/types.h>
typedef __kernel_fsid_t fsid_t;
#endif
/* /*
* This is ugly --- we're already 64-bit, so just duplicate the definitions * This is ugly --- we're already 64-bit, so just duplicate the definitions
*/ */
......
...@@ -240,6 +240,18 @@ extern unsigned long __copy_user (void *to, const void *from, unsigned long coun ...@@ -240,6 +240,18 @@ extern unsigned long __copy_user (void *to, const void *from, unsigned long coun
__cu_len; \ __cu_len; \
}) })
#define __copy_in_user(to, from, size) \
__copy_user((to), (from), (size))
static inline unsigned long
copy_in_user (void *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n) &&
access_ok(VERIFY_WRITE, to, n)))
n = __copy_user(to, from, n);
return n;
}
extern unsigned long __do_clear_user (void *, unsigned long); extern unsigned long __do_clear_user (void *, unsigned long);
#define __clear_user(to,n) \ #define __clear_user(to,n) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment