Commit aca239b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
  [IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
  [IA64] run some drivers/misc/sgi-xp through scripts/Lindent
  [IA64] move XP and XPC to drivers/misc/sgi-xp
  [IA64] minor irq handler cleanups
  [IA64] simplify notify hooks in mca.c
  [IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs
  [IA64] disable interrupts on exit of ia64_trace_syscall
parents 16abef0e 2c2b94f9
...@@ -266,17 +266,6 @@ config IOSAPIC ...@@ -266,17 +266,6 @@ config IOSAPIC
depends on !IA64_HP_SIM depends on !IA64_HP_SIM
default y default y
config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR
help
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE range 11 17 if !HUGETLB_PAGE
......
...@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) ...@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
unw_init_running(kdump_cpu_freeze, NULL); unw_init_running(kdump_cpu_freeze, NULL);
break; break;
case DIE_MCA_MONARCH_LEAVE: case DIE_MCA_MONARCH_LEAVE:
/* die_register->signr indicate if MCA is recoverable */ /* *(nd->data) indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !args->signr) { if (kdump_on_fatal_mca && !(*(nd->data))) {
atomic_set(&kdump_in_progress, 1); atomic_set(&kdump_in_progress, 1);
*(nd->monarch_cpu) = -1; *(nd->monarch_cpu) = -1;
machine_kdump_on_init(); machine_kdump_on_init();
......
...@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall) ...@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3: .ret3:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
(pUStk) rsm psr.i // disable interrupts
br.cond.sptk .work_pending_syscall_end br.cond.sptk .work_pending_syscall_end
strace_error: strace_error:
......
...@@ -109,6 +109,20 @@ ...@@ -109,6 +109,20 @@
# define IA64_MCA_DEBUG(fmt...) # define IA64_MCA_DEBUG(fmt...)
#endif #endif
#define NOTIFY_INIT(event, regs, arg, spin) \
do { \
if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)
#define NOTIFY_MCA(event, regs, arg, spin) \
do { \
if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)
/* Used by mca_asm.S */ /* Used by mca_asm.S */
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
...@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
/* Mask all interrupts */ /* Mask all interrupts */
local_irq_save(flags); local_irq_save(flags);
if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP) NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has /* Register with the SAL monarch that the slave has
...@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
*/ */
ia64_sal_mc_rendez(); ia64_sal_mc_rendez();
if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
/* Wait for the monarch cpu to exit. */ /* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1) while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */ cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */ /* Enable all interrupts */
...@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
int recover, cpu = smp_processor_id(); int recover, cpu = smp_processor_id();
struct task_struct *previous_current; struct task_struct *previous_current;
struct ia64_mca_notify_die nd = struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu }; { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
static atomic_t mca_count; static atomic_t mca_count;
static cpumask_t mca_cpu; static cpumask_t mca_cpu;
...@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) { if (sos->monarch) {
...@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* does not work. * does not work.
*/ */
ia64_mca_wakeup_all(); ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
} else { } else {
while (cpu_isset(cpu, mca_cpu)) while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */ cpu_relax(); /* spin until monarch wakes us */
} }
NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
/* Get the MCA error record and log it */ /* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
...@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
mca_insert_tr(0x2); /*Reload dynamic itrs*/ mca_insert_tr(0x2); /*Reload dynamic itrs*/
} }
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
if (atomic_dec_return(&mca_count) > 0) { if (atomic_dec_return(&mca_count) > 0) {
int i; int i;
...@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
struct ia64_mca_notify_die nd = struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu }; { .sos = sos, .monarch_cpu = &monarch_cpu };
(void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch); sos->proc_state_param, cpu, sos->monarch);
...@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1) while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */ cpu_relax(); /* spin until monarch enters */
if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
ia64_mca_spin(__func__); NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
while (monarch_cpu != -1) while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */ cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
ia64_mca_spin(__func__);
mprintk("Slave on cpu %d returning to normal service.\n", cpu); mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
...@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
} }
monarch_cpu = cpu; monarch_cpu = cpu;
if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
/* /*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
...@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
* to default_monarch_init_process() above and just print all the * to default_monarch_init_process() above and just print all the
* tasks. * tasks.
*/ */
if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs); atomic_dec(&monarchs);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
...@@ -1953,7 +1950,7 @@ ia64_mca_init(void) ...@@ -1953,7 +1950,7 @@ ia64_mca_init(void)
printk(KERN_INFO "Increasing MCA rendezvous timeout from " printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0); "%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0; timeout = isrv.v0;
(void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
continue; continue;
} }
printk(KERN_ERR "Failed to register rendezvous interrupt " printk(KERN_ERR "Failed to register rendezvous interrupt "
......
...@@ -5511,7 +5511,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5511,7 +5511,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
} }
static int static int
pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
{ {
struct task_struct *task; struct task_struct *task;
pfm_context_t *ctx; pfm_context_t *ctx;
...@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg) ...@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)
start_cycles = ia64_get_itc(); start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs); ret = pfm_do_interrupt_handler(arg, regs);
total_cycles = ia64_get_itc(); total_cycles = ia64_get_itc();
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
# #
EXTRA_CFLAGS += -Iarch/ia64/sn/include EXTRA_CFLAGS += -Iarch/ia64/sn/include
...@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ ...@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
sn2/ sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
obj-$(CONFIG_PCI_MSI) += msi_sn.o obj-$(CONFIG_PCI_MSI) += msi_sn.o
...@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info) ...@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
{ {
if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
"SN_hub_error", (void *)hubdev_info)) { "SN_hub_error", hubdev_info)) {
printk("hub_error_init: Failed to request_irq for 0x%p\n", printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info); hubdev_info);
return; return;
} }
......
...@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) ...@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
* *
* Simply call tioce_do_dma_map() to create a map with the barrier bit set * Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address. * in the address.
*/ static u64 */
static u64
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{ {
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
...@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma ...@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
* *
* Handle a CE error interrupt. Simply a wrapper around a SAL call which * Handle a CE error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom. * defers processing to the SGI prom.
*/ static irqreturn_t */
static irqreturn_t
tioce_error_intr_handler(int irq, void *arg) tioce_error_intr_handler(int irq, void *arg)
{ {
struct tioce_common *soft = arg; struct tioce_common *soft = arg;
......
...@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES ...@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
driver (SCSI/ATA) which supports enclosures driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services. or a SCSI enclosure device (SES) to use these services.
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
---help---
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
endif # MISC_DEVICES endif # MISC_DEVICES
...@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o ...@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
#
# Makefile for SGI's XP devices.
#
obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_SGI_XP) += xpnet.o
...@@ -3,18 +3,15 @@ ...@@ -3,18 +3,15 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
*/ */
/* /*
* External Cross Partition (XP) structures and defines. * External Cross Partition (XP) structures and defines.
*/ */
#ifndef _DRIVERS_MISC_SGIXP_XP_H
#ifndef _ASM_IA64_SN_XP_H #define _DRIVERS_MISC_SGIXP_XP_H
#define _ASM_IA64_SN_XP_H
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -22,14 +19,12 @@ ...@@ -22,14 +19,12 @@
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
#ifdef USE_DBUG_ON #ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition) #define DBUG_ON(condition) BUG_ON(condition)
#else #else
#define DBUG_ON(condition) #define DBUG_ON(condition)
#endif #endif
/* /*
* Define the maximum number of logically defined partitions the system * Define the maximum number of logically defined partitions the system
* can support. It is constrained by the maximum number of hardware * can support. It is constrained by the maximum number of hardware
...@@ -43,7 +38,6 @@ ...@@ -43,7 +38,6 @@
*/ */
#define XP_MAX_PARTITIONS 64 #define XP_MAX_PARTITIONS 64
/* /*
* Define the number of u64s required to represent all the C-brick nasids * Define the number of u64s required to represent all the C-brick nasids
* as a bitmap. The cross-partition kernel modules deal only with * as a bitmap. The cross-partition kernel modules deal only with
...@@ -54,7 +48,6 @@ ...@@ -54,7 +48,6 @@
#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
/* /*
* Wrapper for bte_copy() that should it return a failure status will retry * Wrapper for bte_copy() that should it return a failure status will retry
* the bte_copy() once in the hope that the failure was due to a temporary * the bte_copy() once in the hope that the failure was due to a temporary
...@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
bte_result_t ret; bte_result_t ret;
u64 pdst = ia64_tpa(vdst); u64 pdst = ia64_tpa(vdst);
/* /*
* Ensure that the physically mapped memory is contiguous. * Ensure that the physically mapped memory is contiguous.
* *
...@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
ret = bte_copy(src, pdst, len, mode, notification); ret = bte_copy(src, pdst, len, mode, notification);
if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
if (!in_interrupt()) { if (!in_interrupt())
cond_resched(); cond_resched();
}
ret = bte_copy(src, pdst, len, mode, notification); ret = bte_copy(src, pdst, len, mode, notification);
} }
return ret; return ret;
} }
/* /*
* XPC establishes channel connections between the local partition and any * XPC establishes channel connections between the local partition and any
* other partition that is currently up. Over these channels, kernel-level * other partition that is currently up. Over these channels, kernel-level
...@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
#error XPC_NCHANNELS exceeds MAXIMUM allowed. #error XPC_NCHANNELS exceeds MAXIMUM allowed.
#endif #endif
/* /*
* The format of an XPC message is as follows: * The format of an XPC message is as follows:
* *
...@@ -160,12 +150,10 @@ struct xpc_msg { ...@@ -160,12 +150,10 @@ struct xpc_msg {
u64 payload; /* user defined portion of message */ u64 payload; /* user defined portion of message */
}; };
#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
#define XPC_MSG_SIZE(_payload_size) \ #define XPC_MSG_SIZE(_payload_size) \
L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
/* /*
* Define the return values and values passed to user's callout functions. * Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding * (It is important to add new value codes at the end just preceding
...@@ -267,10 +255,9 @@ enum xpc_retval { ...@@ -267,10 +255,9 @@ enum xpc_retval {
/* 115: BTE end */ /* 115: BTE end */
xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
xpcUnknownReason /* 116: unknown reason -- must be last in list */ xpcUnknownReason /* 116: unknown reason - must be last in enum */
}; };
/* /*
* Define the callout function types used by XPC to update the user on * Define the callout function types used by XPC to update the user on
* connection activity and state changes (via the user function registered by * connection activity and state changes (via the user function registered by
...@@ -375,12 +362,11 @@ enum xpc_retval { ...@@ -375,12 +362,11 @@ enum xpc_retval {
* =====================+================================+===================== * =====================+================================+=====================
*/ */
typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
int ch_number, void *data, void *key); int ch_number, void *data, void *key);
typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
int ch_number, void *key);
typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
int ch_number, void *key);
/* /*
* The following is a registration entry. There is a global array of these, * The following is a registration entry. There is a global array of these,
...@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, ...@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
*/ */
struct xpc_registration { struct xpc_registration {
struct mutex mutex; struct mutex mutex;
xpc_channel_func func; /* function to call */ xpc_channel_func func; /* function to call */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
u16 nentries; /* #of msg entries in local msg queue */ u16 nentries; /* #of msg entries in local msg queue */
u16 msg_size; /* message queue's message size */ u16 msg_size; /* message queue's message size */
u32 assigned_limit; /* limit on #of assigned kthreads */ u32 assigned_limit; /* limit on #of assigned kthreads */
u32 idle_limit; /* limit on #of idle kthreads */ u32 idle_limit; /* limit on #of idle kthreads */
} ____cacheline_aligned; } ____cacheline_aligned;
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
/* the following are valid xpc_allocate() flags */ /* the following are valid xpc_allocate() flags */
#define XPC_WAIT 0 /* wait flag */ #define XPC_WAIT 0 /* wait flag */
#define XPC_NOWAIT 1 /* no wait flag */ #define XPC_NOWAIT 1 /* no wait flag */
struct xpc_interface { struct xpc_interface {
void (*connect)(int); void (*connect) (int);
void (*disconnect)(int); void (*disconnect) (int);
enum xpc_retval (*allocate)(partid_t, int, u32, void **); enum xpc_retval (*allocate) (partid_t, int, u32, void **);
enum xpc_retval (*send)(partid_t, int, void *); enum xpc_retval (*send) (partid_t, int, void *);
enum xpc_retval (*send_notify)(partid_t, int, void *, enum xpc_retval (*send_notify) (partid_t, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
void (*received)(partid_t, int, void *); void (*received) (partid_t, int, void *);
enum xpc_retval (*partid_to_nasids)(partid_t, void *); enum xpc_retval (*partid_to_nasids) (partid_t, void *);
}; };
extern struct xpc_interface xpc_interface; extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int), extern void xpc_set_interface(void (*)(int),
void (*)(int), void (*)(int),
enum xpc_retval (*)(partid_t, int, u32, void **), enum xpc_retval (*)(partid_t, int, u32, void **),
enum xpc_retval (*)(partid_t, int, void *), enum xpc_retval (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, enum xpc_retval (*)(partid_t, int, void *,
void *), xpc_notify_func, void *),
void (*)(partid_t, int, void *), void (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, void *)); enum xpc_retval (*)(partid_t, void *));
extern void xpc_clear_interface(void); extern void xpc_clear_interface(void);
extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
u16, u32, u32); u16, u32, u32);
extern void xpc_disconnect(int); extern void xpc_disconnect(int);
static inline enum xpc_retval static inline enum xpc_retval
...@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload) ...@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload)
static inline enum xpc_retval static inline enum xpc_retval
xpc_send_notify(partid_t partid, int ch_number, void *payload, xpc_send_notify(partid_t partid, int ch_number, void *payload,
xpc_notify_func func, void *key) xpc_notify_func func, void *key)
{ {
return xpc_interface.send_notify(partid, ch_number, payload, func, key); return xpc_interface.send_notify(partid, ch_number, payload, func, key);
} }
...@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids) ...@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
return xpc_interface.partid_to_nasids(partid, nasids); return xpc_interface.partid_to_nasids(partid, nasids);
} }
extern u64 xp_nofault_PIOR_target; extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *); extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void); extern int xp_error_PIOR(void);
#endif /* _DRIVERS_MISC_SGIXP_XP_H */
#endif /* _ASM_IA64_SN_XP_H */
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition (XP) base. * Cross Partition (XP) base.
* *
...@@ -15,58 +14,64 @@ ...@@ -15,58 +14,64 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h> #include "xp.h"
/* /*
* Target of nofault PIO read. * The export of xp_nofault_PIOR needs to happen here since it is defined
* in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
* defined here.
*/ */
u64 xp_nofault_PIOR_target; EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
/* /*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC. * users of XPC.
*/ */
struct xpc_registration xpc_registrations[XPC_NCHANNELS]; struct xpc_registration xpc_registrations[XPC_NCHANNELS];
EXPORT_SYMBOL_GPL(xpc_registrations);
/* /*
* Initialize the XPC interface to indicate that XPC isn't loaded. * Initialize the XPC interface to indicate that XPC isn't loaded.
*/ */
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } static enum xpc_retval
xpc_notloaded(void)
{
return xpcNotLoaded;
}
struct xpc_interface xpc_interface = { struct xpc_interface xpc_interface = {
(void (*)(int)) xpc_notloaded, (void (*)(int))xpc_notloaded,
(void (*)(int)) xpc_notloaded, (void (*)(int))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
xpc_notloaded, xpc_notloaded,
(void (*)(partid_t, int, void *)) xpc_notloaded, (void (*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
}; };
EXPORT_SYMBOL_GPL(xpc_interface);
/* /*
* XPC calls this when it (the XPC module) has been loaded. * XPC calls this when it (the XPC module) has been loaded.
*/ */
void void
xpc_set_interface(void (*connect)(int), xpc_set_interface(void (*connect) (int),
void (*disconnect)(int), void (*disconnect) (int),
enum xpc_retval (*allocate)(partid_t, int, u32, void **), enum xpc_retval (*allocate) (partid_t, int, u32, void **),
enum xpc_retval (*send)(partid_t, int, void *), enum xpc_retval (*send) (partid_t, int, void *),
enum xpc_retval (*send_notify)(partid_t, int, void *, enum xpc_retval (*send_notify) (partid_t, int, void *,
xpc_notify_func, void *), xpc_notify_func, void *),
void (*received)(partid_t, int, void *), void (*received) (partid_t, int, void *),
enum xpc_retval (*partid_to_nasids)(partid_t, void *)) enum xpc_retval (*partid_to_nasids) (partid_t, void *))
{ {
xpc_interface.connect = connect; xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect; xpc_interface.disconnect = disconnect;
...@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int), ...@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int),
xpc_interface.received = received; xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids; xpc_interface.partid_to_nasids = partid_to_nasids;
} }
EXPORT_SYMBOL_GPL(xpc_set_interface);
/* /*
* XPC calls this when it (the XPC module) is being unloaded. * XPC calls this when it (the XPC module) is being unloaded.
...@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int), ...@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int),
void void
xpc_clear_interface(void) xpc_clear_interface(void)
{ {
xpc_interface.connect = (void (*)(int)) xpc_notloaded; xpc_interface.connect = (void (*)(int))xpc_notloaded;
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
void **)) xpc_notloaded; void **))xpc_notloaded;
xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
xpc_notify_func, void *)) xpc_notloaded; xpc_notify_func,
void *))xpc_notloaded;
xpc_interface.received = (void (*)(partid_t, int, void *)) xpc_interface.received = (void (*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
xpc_notloaded; xpc_notloaded;
} }
EXPORT_SYMBOL_GPL(xpc_clear_interface);
/* /*
* Register for automatic establishment of a channel connection whenever * Register for automatic establishment of a channel connection whenever
...@@ -125,11 +131,10 @@ xpc_clear_interface(void) ...@@ -125,11 +131,10 @@ xpc_clear_interface(void)
*/ */
enum xpc_retval enum xpc_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit) u16 nentries, u32 assigned_limit, u32 idle_limit)
{ {
struct xpc_registration *registration; struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0); DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL); DBUG_ON(func == NULL);
...@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(&registration->mutex) != 0) { if (mutex_lock_interruptible(&registration->mutex) != 0)
return xpcInterrupted; return xpcInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */ /* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) { if (registration->func != NULL) {
...@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
return xpcSuccess; return xpcSuccess;
} }
EXPORT_SYMBOL_GPL(xpc_connect);
/* /*
* Remove the registration for automatic connection of the specified channel * Remove the registration for automatic connection of the specified channel
...@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number) ...@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number)
{ {
struct xpc_registration *registration; struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
...@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number) ...@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number)
return; return;
} }
EXPORT_SYMBOL_GPL(xpc_disconnect);
int __init int __init
xp_init(void) xp_init(void)
{ {
int ret, ch_number; int ret, ch_number;
u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR; u64 err_func_addr = *(u64 *)xp_error_PIOR;
if (!ia64_platform_is("sn2"))
if (!ia64_platform_is("sn2")) {
return -ENODEV; return -ENODEV;
}
/* /*
* Register a nofault code region which performs a cross-partition * Register a nofault code region which performs a cross-partition
...@@ -236,55 +237,43 @@ xp_init(void) ...@@ -236,55 +237,43 @@ xp_init(void)
* least some CPUs on Shubs <= v1.2, which unfortunately we have to * least some CPUs on Shubs <= v1.2, which unfortunately we have to
* work around). * work around).
*/ */
if ((ret = sn_register_nofault_code(func_addr, err_func_addr, ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
err_func_addr, 1, 1)) != 0) { 1, 1);
if (ret != 0) {
printk(KERN_ERR "XP: can't register nofault code, error=%d\n", printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
ret); ret);
} }
/* /*
* Setup the nofault PIO read target. (There is no special reason why * Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.) * SH_IPI_ACCESS was selected.)
*/ */
if (is_shub2()) { if (is_shub2())
xp_nofault_PIOR_target = SH2_IPI_ACCESS0; xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
} else { else
xp_nofault_PIOR_target = SH1_IPI_ACCESS; xp_nofault_PIOR_target = SH1_IPI_ACCESS;
}
/* initialize the connection registration mutex */ /* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex); mutex_init(&xpc_registrations[ch_number].mutex);
}
return 0; return 0;
} }
module_init(xp_init);
module_init(xp_init);
void __exit void __exit
xp_exit(void) xp_exit(void)
{ {
u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR; u64 err_func_addr = *(u64 *)xp_error_PIOR;
/* unregister the PIO read nofault code region */ /* unregister the PIO read nofault code region */
(void) sn_register_nofault_code(func_addr, err_func_addr, (void)sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0); err_func_addr, 1, 0);
} }
module_exit(xp_exit);
module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base"); MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_SYMBOL(xp_nofault_PIOR);
EXPORT_SYMBOL(xp_nofault_PIOR_target);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register * The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function * and attempts to load and consume a value from it. This function
......
...@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...) ...@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
struct ia64_mca_notify_die { struct ia64_mca_notify_die {
struct ia64_sal_os_state *sos; struct ia64_sal_os_state *sos;
int *monarch_cpu; int *monarch_cpu;
int *data;
}; };
DECLARE_PER_CPU(u64, ia64_mca_pal_base); DECLARE_PER_CPU(u64, ia64_mca_pal_base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment