Commit aca239b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
  [IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
  [IA64] run some drivers/misc/sgi-xp through scripts/Lindent
  [IA64] move XP and XPC to drivers/misc/sgi-xp
  [IA64] minor irq handler cleanups
  [IA64] simplify notify hooks in mca.c
  [IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs
  [IA64] disable interrupts on exit of ia64_trace_syscall
parents 16abef0e 2c2b94f9
...@@ -266,17 +266,6 @@ config IOSAPIC ...@@ -266,17 +266,6 @@ config IOSAPIC
depends on !IA64_HP_SIM depends on !IA64_HP_SIM
default y default y
config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR
help
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE range 11 17 if !HUGETLB_PAGE
......
...@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) ...@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
unw_init_running(kdump_cpu_freeze, NULL); unw_init_running(kdump_cpu_freeze, NULL);
break; break;
case DIE_MCA_MONARCH_LEAVE: case DIE_MCA_MONARCH_LEAVE:
/* die_register->signr indicate if MCA is recoverable */ /* *(nd->data) indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !args->signr) { if (kdump_on_fatal_mca && !(*(nd->data))) {
atomic_set(&kdump_in_progress, 1); atomic_set(&kdump_in_progress, 1);
*(nd->monarch_cpu) = -1; *(nd->monarch_cpu) = -1;
machine_kdump_on_init(); machine_kdump_on_init();
......
...@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall) ...@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3: .ret3:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
(pUStk) rsm psr.i // disable interrupts
br.cond.sptk .work_pending_syscall_end br.cond.sptk .work_pending_syscall_end
strace_error: strace_error:
......
...@@ -109,6 +109,20 @@ ...@@ -109,6 +109,20 @@
# define IA64_MCA_DEBUG(fmt...) # define IA64_MCA_DEBUG(fmt...)
#endif #endif
#define NOTIFY_INIT(event, regs, arg, spin) \
do { \
if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)
#define NOTIFY_MCA(event, regs, arg, spin) \
do { \
if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
== NOTIFY_STOP) && ((spin) == 1)) \
ia64_mca_spin(__func__); \
} while (0)
/* Used by mca_asm.S */ /* Used by mca_asm.S */
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
...@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
/* Mask all interrupts */ /* Mask all interrupts */
local_irq_save(flags); local_irq_save(flags);
if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP) NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has /* Register with the SAL monarch that the slave has
...@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
*/ */
ia64_sal_mc_rendez(); ia64_sal_mc_rendez();
if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
/* Wait for the monarch cpu to exit. */ /* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1) while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */ cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */ /* Enable all interrupts */
...@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
int recover, cpu = smp_processor_id(); int recover, cpu = smp_processor_id();
struct task_struct *previous_current; struct task_struct *previous_current;
struct ia64_mca_notify_die nd = struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu }; { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
static atomic_t mca_count; static atomic_t mca_count;
static cpumask_t mca_cpu; static cpumask_t mca_cpu;
...@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) { if (sos->monarch) {
...@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* does not work. * does not work.
*/ */
ia64_mca_wakeup_all(); ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
} else { } else {
while (cpu_isset(cpu, mca_cpu)) while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */ cpu_relax(); /* spin until monarch wakes us */
} }
NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
/* Get the MCA error record and log it */ /* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
...@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
mca_insert_tr(0x2); /*Reload dynamic itrs*/ mca_insert_tr(0x2); /*Reload dynamic itrs*/
} }
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
if (atomic_dec_return(&mca_count) > 0) { if (atomic_dec_return(&mca_count) > 0) {
int i; int i;
...@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
struct ia64_mca_notify_die nd = struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu }; { .sos = sos, .monarch_cpu = &monarch_cpu };
(void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch); sos->proc_state_param, cpu, sos->monarch);
...@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1) while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */ cpu_relax(); /* spin until monarch enters */
if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
ia64_mca_spin(__func__); NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
while (monarch_cpu != -1) while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */ cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
ia64_mca_spin(__func__);
mprintk("Slave on cpu %d returning to normal service.\n", cpu); mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
...@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
} }
monarch_cpu = cpu; monarch_cpu = cpu;
if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
== NOTIFY_STOP)
ia64_mca_spin(__func__);
/* /*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
...@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
* to default_monarch_init_process() above and just print all the * to default_monarch_init_process() above and just print all the
* tasks. * tasks.
*/ */
if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
== NOTIFY_STOP) NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__func__);
mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs); atomic_dec(&monarchs);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
...@@ -1953,7 +1950,7 @@ ia64_mca_init(void) ...@@ -1953,7 +1950,7 @@ ia64_mca_init(void)
printk(KERN_INFO "Increasing MCA rendezvous timeout from " printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0); "%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0; timeout = isrv.v0;
(void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
continue; continue;
} }
printk(KERN_ERR "Failed to register rendezvous interrupt " printk(KERN_ERR "Failed to register rendezvous interrupt "
......
...@@ -5511,7 +5511,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5511,7 +5511,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
} }
static int static int
pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
{ {
struct task_struct *task; struct task_struct *task;
pfm_context_t *ctx; pfm_context_t *ctx;
...@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg) ...@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)
start_cycles = ia64_get_itc(); start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs); ret = pfm_do_interrupt_handler(arg, regs);
total_cycles = ia64_get_itc(); total_cycles = ia64_get_itc();
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
# #
EXTRA_CFLAGS += -Iarch/ia64/sn/include EXTRA_CFLAGS += -Iarch/ia64/sn/include
...@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ ...@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
sn2/ sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
obj-$(CONFIG_PCI_MSI) += msi_sn.o obj-$(CONFIG_PCI_MSI) += msi_sn.o
...@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info) ...@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
{ {
if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
"SN_hub_error", (void *)hubdev_info)) { "SN_hub_error", hubdev_info)) {
printk("hub_error_init: Failed to request_irq for 0x%p\n", printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info); hubdev_info);
return; return;
} }
......
...@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) ...@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
* *
* Simply call tioce_do_dma_map() to create a map with the barrier bit set * Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address. * in the address.
*/ static u64 */
static u64
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{ {
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
...@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma ...@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
* *
* Handle a CE error interrupt. Simply a wrapper around a SAL call which * Handle a CE error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom. * defers processing to the SGI prom.
*/ static irqreturn_t */
static irqreturn_t
tioce_error_intr_handler(int irq, void *arg) tioce_error_intr_handler(int irq, void *arg)
{ {
struct tioce_common *soft = arg; struct tioce_common *soft = arg;
......
...@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES ...@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
driver (SCSI/ATA) which supports enclosures driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services. or a SCSI enclosure device (SES) to use these services.
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
---help---
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
endif # MISC_DEVICES endif # MISC_DEVICES
...@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o ...@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
#
# Makefile for SGI's XP devices.
#
obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_SGI_XP) += xpnet.o
...@@ -3,18 +3,15 @@ ...@@ -3,18 +3,15 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
*/ */
/* /*
* External Cross Partition (XP) structures and defines. * External Cross Partition (XP) structures and defines.
*/ */
#ifndef _DRIVERS_MISC_SGIXP_XP_H
#ifndef _ASM_IA64_SN_XP_H #define _DRIVERS_MISC_SGIXP_XP_H
#define _ASM_IA64_SN_XP_H
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -22,14 +19,12 @@ ...@@ -22,14 +19,12 @@
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
#ifdef USE_DBUG_ON #ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition) #define DBUG_ON(condition) BUG_ON(condition)
#else #else
#define DBUG_ON(condition) #define DBUG_ON(condition)
#endif #endif
/* /*
* Define the maximum number of logically defined partitions the system * Define the maximum number of logically defined partitions the system
* can support. It is constrained by the maximum number of hardware * can support. It is constrained by the maximum number of hardware
...@@ -43,7 +38,6 @@ ...@@ -43,7 +38,6 @@
*/ */
#define XP_MAX_PARTITIONS 64 #define XP_MAX_PARTITIONS 64
/* /*
* Define the number of u64s required to represent all the C-brick nasids * Define the number of u64s required to represent all the C-brick nasids
* as a bitmap. The cross-partition kernel modules deal only with * as a bitmap. The cross-partition kernel modules deal only with
...@@ -54,7 +48,6 @@ ...@@ -54,7 +48,6 @@
#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
/* /*
* Wrapper for bte_copy() that should it return a failure status will retry * Wrapper for bte_copy() that should it return a failure status will retry
* the bte_copy() once in the hope that the failure was due to a temporary * the bte_copy() once in the hope that the failure was due to a temporary
...@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
bte_result_t ret; bte_result_t ret;
u64 pdst = ia64_tpa(vdst); u64 pdst = ia64_tpa(vdst);
/* /*
* Ensure that the physically mapped memory is contiguous. * Ensure that the physically mapped memory is contiguous.
* *
...@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
ret = bte_copy(src, pdst, len, mode, notification); ret = bte_copy(src, pdst, len, mode, notification);
if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
if (!in_interrupt()) { if (!in_interrupt())
cond_resched(); cond_resched();
}
ret = bte_copy(src, pdst, len, mode, notification); ret = bte_copy(src, pdst, len, mode, notification);
} }
return ret; return ret;
} }
/* /*
* XPC establishes channel connections between the local partition and any * XPC establishes channel connections between the local partition and any
* other partition that is currently up. Over these channels, kernel-level * other partition that is currently up. Over these channels, kernel-level
...@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) ...@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
#error XPC_NCHANNELS exceeds MAXIMUM allowed. #error XPC_NCHANNELS exceeds MAXIMUM allowed.
#endif #endif
/* /*
* The format of an XPC message is as follows: * The format of an XPC message is as follows:
* *
...@@ -160,12 +150,10 @@ struct xpc_msg { ...@@ -160,12 +150,10 @@ struct xpc_msg {
u64 payload; /* user defined portion of message */ u64 payload; /* user defined portion of message */
}; };
#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
#define XPC_MSG_SIZE(_payload_size) \ #define XPC_MSG_SIZE(_payload_size) \
L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
/* /*
* Define the return values and values passed to user's callout functions. * Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding * (It is important to add new value codes at the end just preceding
...@@ -267,10 +255,9 @@ enum xpc_retval { ...@@ -267,10 +255,9 @@ enum xpc_retval {
/* 115: BTE end */ /* 115: BTE end */
xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
xpcUnknownReason /* 116: unknown reason -- must be last in list */ xpcUnknownReason /* 116: unknown reason - must be last in enum */
}; };
/* /*
* Define the callout function types used by XPC to update the user on * Define the callout function types used by XPC to update the user on
* connection activity and state changes (via the user function registered by * connection activity and state changes (via the user function registered by
...@@ -375,12 +362,11 @@ enum xpc_retval { ...@@ -375,12 +362,11 @@ enum xpc_retval {
* =====================+================================+===================== * =====================+================================+=====================
*/ */
typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
int ch_number, void *data, void *key); int ch_number, void *data, void *key);
typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
int ch_number, void *key);
typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
int ch_number, void *key);
/* /*
* The following is a registration entry. There is a global array of these, * The following is a registration entry. There is a global array of these,
...@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, ...@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
*/ */
struct xpc_registration { struct xpc_registration {
struct mutex mutex; struct mutex mutex;
xpc_channel_func func; /* function to call */ xpc_channel_func func; /* function to call */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
u16 nentries; /* #of msg entries in local msg queue */ u16 nentries; /* #of msg entries in local msg queue */
u16 msg_size; /* message queue's message size */ u16 msg_size; /* message queue's message size */
u32 assigned_limit; /* limit on #of assigned kthreads */ u32 assigned_limit; /* limit on #of assigned kthreads */
u32 idle_limit; /* limit on #of idle kthreads */ u32 idle_limit; /* limit on #of idle kthreads */
} ____cacheline_aligned; } ____cacheline_aligned;
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
/* the following are valid xpc_allocate() flags */ /* the following are valid xpc_allocate() flags */
#define XPC_WAIT 0 /* wait flag */ #define XPC_WAIT 0 /* wait flag */
#define XPC_NOWAIT 1 /* no wait flag */ #define XPC_NOWAIT 1 /* no wait flag */
struct xpc_interface { struct xpc_interface {
void (*connect)(int); void (*connect) (int);
void (*disconnect)(int); void (*disconnect) (int);
enum xpc_retval (*allocate)(partid_t, int, u32, void **); enum xpc_retval (*allocate) (partid_t, int, u32, void **);
enum xpc_retval (*send)(partid_t, int, void *); enum xpc_retval (*send) (partid_t, int, void *);
enum xpc_retval (*send_notify)(partid_t, int, void *, enum xpc_retval (*send_notify) (partid_t, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
void (*received)(partid_t, int, void *); void (*received) (partid_t, int, void *);
enum xpc_retval (*partid_to_nasids)(partid_t, void *); enum xpc_retval (*partid_to_nasids) (partid_t, void *);
}; };
extern struct xpc_interface xpc_interface; extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int), extern void xpc_set_interface(void (*)(int),
void (*)(int), void (*)(int),
enum xpc_retval (*)(partid_t, int, u32, void **), enum xpc_retval (*)(partid_t, int, u32, void **),
enum xpc_retval (*)(partid_t, int, void *), enum xpc_retval (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, enum xpc_retval (*)(partid_t, int, void *,
void *), xpc_notify_func, void *),
void (*)(partid_t, int, void *), void (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, void *)); enum xpc_retval (*)(partid_t, void *));
extern void xpc_clear_interface(void); extern void xpc_clear_interface(void);
extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
u16, u32, u32); u16, u32, u32);
extern void xpc_disconnect(int); extern void xpc_disconnect(int);
static inline enum xpc_retval static inline enum xpc_retval
...@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload) ...@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload)
static inline enum xpc_retval static inline enum xpc_retval
xpc_send_notify(partid_t partid, int ch_number, void *payload, xpc_send_notify(partid_t partid, int ch_number, void *payload,
xpc_notify_func func, void *key) xpc_notify_func func, void *key)
{ {
return xpc_interface.send_notify(partid, ch_number, payload, func, key); return xpc_interface.send_notify(partid, ch_number, payload, func, key);
} }
...@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids) ...@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
return xpc_interface.partid_to_nasids(partid, nasids); return xpc_interface.partid_to_nasids(partid, nasids);
} }
extern u64 xp_nofault_PIOR_target; extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *); extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void); extern int xp_error_PIOR(void);
#endif /* _DRIVERS_MISC_SGIXP_XP_H */
#endif /* _ASM_IA64_SN_XP_H */
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition (XP) base. * Cross Partition (XP) base.
* *
...@@ -15,58 +14,64 @@ ...@@ -15,58 +14,64 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h> #include "xp.h"
/* /*
* Target of nofault PIO read. * The export of xp_nofault_PIOR needs to happen here since it is defined
* in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
* defined here.
*/ */
u64 xp_nofault_PIOR_target; EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
/* /*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC. * users of XPC.
*/ */
struct xpc_registration xpc_registrations[XPC_NCHANNELS]; struct xpc_registration xpc_registrations[XPC_NCHANNELS];
EXPORT_SYMBOL_GPL(xpc_registrations);
/* /*
* Initialize the XPC interface to indicate that XPC isn't loaded. * Initialize the XPC interface to indicate that XPC isn't loaded.
*/ */
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } static enum xpc_retval
xpc_notloaded(void)
{
return xpcNotLoaded;
}
struct xpc_interface xpc_interface = { struct xpc_interface xpc_interface = {
(void (*)(int)) xpc_notloaded, (void (*)(int))xpc_notloaded,
(void (*)(int)) xpc_notloaded, (void (*)(int))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
xpc_notloaded, xpc_notloaded,
(void (*)(partid_t, int, void *)) xpc_notloaded, (void (*)(partid_t, int, void *))xpc_notloaded,
(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
}; };
EXPORT_SYMBOL_GPL(xpc_interface);
/* /*
* XPC calls this when it (the XPC module) has been loaded. * XPC calls this when it (the XPC module) has been loaded.
*/ */
void void
xpc_set_interface(void (*connect)(int), xpc_set_interface(void (*connect) (int),
void (*disconnect)(int), void (*disconnect) (int),
enum xpc_retval (*allocate)(partid_t, int, u32, void **), enum xpc_retval (*allocate) (partid_t, int, u32, void **),
enum xpc_retval (*send)(partid_t, int, void *), enum xpc_retval (*send) (partid_t, int, void *),
enum xpc_retval (*send_notify)(partid_t, int, void *, enum xpc_retval (*send_notify) (partid_t, int, void *,
xpc_notify_func, void *), xpc_notify_func, void *),
void (*received)(partid_t, int, void *), void (*received) (partid_t, int, void *),
enum xpc_retval (*partid_to_nasids)(partid_t, void *)) enum xpc_retval (*partid_to_nasids) (partid_t, void *))
{ {
xpc_interface.connect = connect; xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect; xpc_interface.disconnect = disconnect;
...@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int), ...@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int),
xpc_interface.received = received; xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids; xpc_interface.partid_to_nasids = partid_to_nasids;
} }
EXPORT_SYMBOL_GPL(xpc_set_interface);
/* /*
* XPC calls this when it (the XPC module) is being unloaded. * XPC calls this when it (the XPC module) is being unloaded.
...@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int), ...@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int),
void void
xpc_clear_interface(void) xpc_clear_interface(void)
{ {
xpc_interface.connect = (void (*)(int)) xpc_notloaded; xpc_interface.connect = (void (*)(int))xpc_notloaded;
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
void **)) xpc_notloaded; void **))xpc_notloaded;
xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
xpc_notify_func, void *)) xpc_notloaded; xpc_notify_func,
void *))xpc_notloaded;
xpc_interface.received = (void (*)(partid_t, int, void *)) xpc_interface.received = (void (*)(partid_t, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
xpc_notloaded; xpc_notloaded;
} }
EXPORT_SYMBOL_GPL(xpc_clear_interface);
/* /*
* Register for automatic establishment of a channel connection whenever * Register for automatic establishment of a channel connection whenever
...@@ -125,11 +131,10 @@ xpc_clear_interface(void) ...@@ -125,11 +131,10 @@ xpc_clear_interface(void)
*/ */
enum xpc_retval enum xpc_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit) u16 nentries, u32 assigned_limit, u32 idle_limit)
{ {
struct xpc_registration *registration; struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0); DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL); DBUG_ON(func == NULL);
...@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(&registration->mutex) != 0) { if (mutex_lock_interruptible(&registration->mutex) != 0)
return xpcInterrupted; return xpcInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */ /* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) { if (registration->func != NULL) {
...@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
return xpcSuccess; return xpcSuccess;
} }
EXPORT_SYMBOL_GPL(xpc_connect);
/* /*
* Remove the registration for automatic connection of the specified channel * Remove the registration for automatic connection of the specified channel
...@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number) ...@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number)
{ {
struct xpc_registration *registration; struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
...@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number) ...@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number)
return; return;
} }
EXPORT_SYMBOL_GPL(xpc_disconnect);
int __init int __init
xp_init(void) xp_init(void)
{ {
int ret, ch_number; int ret, ch_number;
u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR; u64 err_func_addr = *(u64 *)xp_error_PIOR;
if (!ia64_platform_is("sn2"))
if (!ia64_platform_is("sn2")) {
return -ENODEV; return -ENODEV;
}
/* /*
* Register a nofault code region which performs a cross-partition * Register a nofault code region which performs a cross-partition
...@@ -236,55 +237,43 @@ xp_init(void) ...@@ -236,55 +237,43 @@ xp_init(void)
* least some CPUs on Shubs <= v1.2, which unfortunately we have to * least some CPUs on Shubs <= v1.2, which unfortunately we have to
* work around). * work around).
*/ */
if ((ret = sn_register_nofault_code(func_addr, err_func_addr, ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
err_func_addr, 1, 1)) != 0) { 1, 1);
if (ret != 0) {
printk(KERN_ERR "XP: can't register nofault code, error=%d\n", printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
ret); ret);
} }
/* /*
* Setup the nofault PIO read target. (There is no special reason why * Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.) * SH_IPI_ACCESS was selected.)
*/ */
if (is_shub2()) { if (is_shub2())
xp_nofault_PIOR_target = SH2_IPI_ACCESS0; xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
} else { else
xp_nofault_PIOR_target = SH1_IPI_ACCESS; xp_nofault_PIOR_target = SH1_IPI_ACCESS;
}
/* initialize the connection registration mutex */ /* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex); mutex_init(&xpc_registrations[ch_number].mutex);
}
return 0; return 0;
} }
module_init(xp_init);
module_init(xp_init);
void __exit void __exit
xp_exit(void) xp_exit(void)
{ {
u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR; u64 err_func_addr = *(u64 *)xp_error_PIOR;
/* unregister the PIO read nofault code region */ /* unregister the PIO read nofault code region */
(void) sn_register_nofault_code(func_addr, err_func_addr, (void)sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0); err_func_addr, 1, 0);
} }
module_exit(xp_exit);
module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base"); MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_SYMBOL(xp_nofault_PIOR);
EXPORT_SYMBOL(xp_nofault_PIOR_target);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register * The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function * and attempts to load and consume a value from it. This function
......
...@@ -3,17 +3,15 @@ ...@@ -3,17 +3,15 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition Communication (XPC) structures and macros. * Cross Partition Communication (XPC) structures and macros.
*/ */
#ifndef _ASM_IA64_SN_XPC_H #ifndef _DRIVERS_MISC_SGIXP_XPC_H
#define _ASM_IA64_SN_XPC_H #define _DRIVERS_MISC_SGIXP_XPC_H
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -27,8 +25,7 @@ ...@@ -27,8 +25,7 @@
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/sn/mspec.h> #include <asm/sn/mspec.h>
#include <asm/sn/shub_mmr.h> #include <asm/sn/shub_mmr.h>
#include <asm/sn/xp.h> #include "xp.h"
/* /*
* XPC Version numbers consist of a major and minor number. XPC can always * XPC Version numbers consist of a major and minor number. XPC can always
...@@ -39,7 +36,6 @@ ...@@ -39,7 +36,6 @@
#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) #define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) #define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
/* /*
* The next macros define word or bit representations for given * The next macros define word or bit representations for given
* C-brick nasid in either the SAL provided bit array representing * C-brick nasid in either the SAL provided bit array representing
...@@ -67,7 +63,6 @@ ...@@ -67,7 +63,6 @@
/* define the process name of the discovery thread */ /* define the process name of the discovery thread */
#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
/* /*
* the reserved page * the reserved page
* *
...@@ -115,16 +110,16 @@ struct xpc_rsvd_page { ...@@ -115,16 +110,16 @@ struct xpc_rsvd_page {
u8 partid; /* SAL: partition ID */ u8 partid; /* SAL: partition ID */
u8 version; u8 version;
u8 pad1[6]; /* align to next u64 in cacheline */ u8 pad1[6]; /* align to next u64 in cacheline */
volatile u64 vars_pa; u64 vars_pa; /* physical address of struct xpc_vars */
struct timespec stamp; /* time when reserved page was setup by XPC */ struct timespec stamp; /* time when reserved page was setup by XPC */
u64 pad2[9]; /* align to last u64 in cacheline */ u64 pad2[9]; /* align to last u64 in cacheline */
u64 nasids_size; /* SAL: size of each nasid mask in bytes */ u64 nasids_size; /* SAL: size of each nasid mask in bytes */
}; };
#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ #define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
#define XPC_SUPPORTS_RP_STAMP(_version) \ #define XPC_SUPPORTS_RP_STAMP(_version) \
(_version >= _XPC_VERSION(1,1)) (_version >= _XPC_VERSION(1, 1))
/* /*
* compare stamps - the return value is: * compare stamps - the return value is:
...@@ -138,14 +133,13 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2) ...@@ -138,14 +133,13 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
{ {
int ret; int ret;
ret = stamp1->tv_sec - stamp2->tv_sec;
if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { if (ret == 0)
ret = stamp1->tv_nsec - stamp2->tv_nsec; ret = stamp1->tv_nsec - stamp2->tv_nsec;
}
return ret; return ret;
} }
/* /*
* Define the structures by which XPC variables can be exported to other * Define the structures by which XPC variables can be exported to other
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part) * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
...@@ -172,11 +166,10 @@ struct xpc_vars { ...@@ -172,11 +166,10 @@ struct xpc_vars {
AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
}; };
#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ #define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
(_version >= _XPC_VERSION(3,1)) (_version >= _XPC_VERSION(3, 1))
static inline int static inline int
xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
...@@ -193,7 +186,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars) ...@@ -193,7 +186,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
old_mask = vars->heartbeating_to_mask; old_mask = vars->heartbeating_to_mask;
new_mask = (old_mask | (1UL << partid)); new_mask = (old_mask | (1UL << partid));
} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
old_mask); old_mask);
} }
static inline void static inline void
...@@ -205,10 +198,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) ...@@ -205,10 +198,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
old_mask = vars->heartbeating_to_mask; old_mask = vars->heartbeating_to_mask;
new_mask = (old_mask & ~(1UL << partid)); new_mask = (old_mask & ~(1UL << partid));
} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
old_mask); old_mask);
} }
/* /*
* The AMOs page consists of a number of AMO variables which are divided into * The AMOs page consists of a number of AMO variables which are divided into
* four groups, The first two groups are used to identify an IRQ's sender. * four groups, The first two groups are used to identify an IRQ's sender.
...@@ -222,7 +214,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) ...@@ -222,7 +214,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
/* /*
* The following structure describes the per partition specific variables. * The following structure describes the per partition specific variables.
* *
...@@ -234,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) ...@@ -234,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
* occupies half a cacheline. * occupies half a cacheline.
*/ */
struct xpc_vars_part { struct xpc_vars_part {
volatile u64 magic; u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */ u64 openclose_args_pa; /* physical address of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */ u64 GPs_pa; /* physical address of Get/Put values */
...@@ -257,20 +248,20 @@ struct xpc_vars_part { ...@@ -257,20 +248,20 @@ struct xpc_vars_part {
* MAGIC2 indicates that this partition has pulled the remote partititions * MAGIC2 indicates that this partition has pulled the remote partititions
* per partition variables that pertain to this partition. * per partition variables that pertain to this partition.
*/ */
#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
/* the reserved page sizes and offsets */ /* the reserved page sizes and offsets */
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) #define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE) #define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) #define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) xp_nasid_mask_words))
#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
/* /*
* Functions registered by add_timer() or called by kernel_thread() only * Functions registered by add_timer() or called by kernel_thread() only
...@@ -285,21 +276,17 @@ struct xpc_vars_part { ...@@ -285,21 +276,17 @@ struct xpc_vars_part {
#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
/* /*
* Define a Get/Put value pair (pointers) used with a message queue. * Define a Get/Put value pair (pointers) used with a message queue.
*/ */
struct xpc_gp { struct xpc_gp {
volatile s64 get; /* Get value */ s64 get; /* Get value */
volatile s64 put; /* Put value */ s64 put; /* Put value */
}; };
#define XPC_GP_SIZE \ #define XPC_GP_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
/* /*
* Define a structure that contains arguments associated with opening and * Define a structure that contains arguments associated with opening and
* closing a channel. * closing a channel.
...@@ -315,20 +302,15 @@ struct xpc_openclose_args { ...@@ -315,20 +302,15 @@ struct xpc_openclose_args {
#define XPC_OPENCLOSE_ARGS_SIZE \ #define XPC_OPENCLOSE_ARGS_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
/* struct xpc_msg flags */ /* struct xpc_msg flags */
#define XPC_M_DONE 0x01 /* msg has been received/consumed */ #define XPC_M_DONE 0x01 /* msg has been received/consumed */
#define XPC_M_READY 0x02 /* msg is ready to be sent */ #define XPC_M_READY 0x02 /* msg is ready to be sent */
#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
#define XPC_MSG_ADDRESS(_payload) \ #define XPC_MSG_ADDRESS(_payload) \
((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
/* /*
* Defines notify entry. * Defines notify entry.
* *
...@@ -336,19 +318,17 @@ struct xpc_openclose_args { ...@@ -336,19 +318,17 @@ struct xpc_openclose_args {
* and consumed by the intended recipient. * and consumed by the intended recipient.
*/ */
struct xpc_notify { struct xpc_notify {
volatile u8 type; /* type of notification */ u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */ /* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */ xpc_notify_func func; /* user's notify function */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
}; };
/* struct xpc_notify type of notification */ /* struct xpc_notify type of notification */
#define XPC_N_CALL 0x01 /* notify function provided by user */ #define XPC_N_CALL 0x01 /* notify function provided by user */
/* /*
* Define the structure that manages all the stuff required by a channel. In * Define the structure that manages all the stuff required by a channel. In
* particular, they are used to manage the messages sent across the channel. * particular, they are used to manage the messages sent across the channel.
...@@ -428,48 +408,48 @@ struct xpc_notify { ...@@ -428,48 +408,48 @@ struct xpc_notify {
* messages. * messages.
*/ */
struct xpc_channel { struct xpc_channel {
partid_t partid; /* ID of remote partition connected */ partid_t partid; /* ID of remote partition connected */
spinlock_t lock; /* lock for updating this structure */ spinlock_t lock; /* lock for updating this structure */
u32 flags; /* general flags */ u32 flags; /* general flags */
enum xpc_retval reason; /* reason why channel is disconnect'g */ enum xpc_retval reason; /* reason why channel is disconnect'g */
int reason_line; /* line# disconnect initiated from */ int reason_line; /* line# disconnect initiated from */
u16 number; /* channel # */ u16 number; /* channel # */
u16 msg_size; /* sizeof each msg entry */ u16 msg_size; /* sizeof each msg entry */
u16 local_nentries; /* #of msg entries in local msg queue */ u16 local_nentries; /* #of msg entries in local msg queue */
u16 remote_nentries; /* #of msg entries in remote msg queue*/ u16 remote_nentries; /* #of msg entries in remote msg queue */
void *local_msgqueue_base; /* base address of kmalloc'd space */ void *local_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *local_msgqueue; /* local message queue */ struct xpc_msg *local_msgqueue; /* local message queue */
void *remote_msgqueue_base; /* base address of kmalloc'd space */ void *remote_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
/* local message queue */ /* local message queue */
u64 remote_msgqueue_pa; /* phys addr of remote partition's */ u64 remote_msgqueue_pa; /* phys addr of remote partition's */
/* local message queue */ /* local message queue */
atomic_t references; /* #of external references to queues */ atomic_t references; /* #of external references to queues */
atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
u8 delayed_IPI_flags; /* IPI flags received, but delayed */ u8 delayed_IPI_flags; /* IPI flags received, but delayed */
/* action until channel disconnected */ /* action until channel disconnected */
/* queue of msg senders who want to be notified when msg received */ /* queue of msg senders who want to be notified when msg received */
atomic_t n_to_notify; /* #of msg senders to notify */ atomic_t n_to_notify; /* #of msg senders to notify */
struct xpc_notify *notify_queue;/* notify queue for messages sent */ struct xpc_notify *notify_queue; /* notify queue for messages sent */
xpc_channel_func func; /* user's channel function */ xpc_channel_func func; /* user's channel function */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
struct completion wdisconnect_wait; /* wait for channel disconnect */ struct completion wdisconnect_wait; /* wait for channel disconnect */
struct xpc_openclose_args *local_openclose_args; /* args passed on */ struct xpc_openclose_args *local_openclose_args; /* args passed on */
/* opening or closing of channel */ /* opening or closing of channel */
/* various flavors of local and remote Get/Put values */ /* various flavors of local and remote Get/Put values */
...@@ -477,56 +457,48 @@ struct xpc_channel { ...@@ -477,56 +457,48 @@ struct xpc_channel {
struct xpc_gp remote_GP; /* remote Get/Put values */ struct xpc_gp remote_GP; /* remote Get/Put values */
struct xpc_gp w_local_GP; /* working local Get/Put values */ struct xpc_gp w_local_GP; /* working local Get/Put values */
struct xpc_gp w_remote_GP; /* working remote Get/Put values */ struct xpc_gp w_remote_GP; /* working remote Get/Put values */
s64 next_msg_to_pull; /* Put value of next msg to pull */ s64 next_msg_to_pull; /* Put value of next msg to pull */
/* kthread management related fields */ /* kthread management related fields */
// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
// >>> dependent on activity over the last interval of time
atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
u32 kthreads_idle_limit; /* limit on #of kthreads idle */ u32 kthreads_idle_limit; /* limit on #of kthreads idle */
atomic_t kthreads_active; /* #of kthreads actively working */ atomic_t kthreads_active; /* #of kthreads actively working */
// >>> following field is temporary
u32 kthreads_created; /* total #of kthreads created */
wait_queue_head_t idle_wq; /* idle kthread wait queue */ wait_queue_head_t idle_wq; /* idle kthread wait queue */
} ____cacheline_aligned; } ____cacheline_aligned;
/* struct xpc_channel flags */ /* struct xpc_channel flags */
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
#define XPC_C_CONNECTEDCALLOUT_MADE \ #define XPC_C_CONNECTEDCALLOUT_MADE \
0x00000080 /* connected callout completed */ 0x00000080 /* connected callout completed */
#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
#define XPC_C_DISCONNECTINGCALLOUT \ #define XPC_C_DISCONNECTINGCALLOUT \
0x00010000 /* disconnecting callout initiated */ 0x00010000 /* disconnecting callout initiated */
#define XPC_C_DISCONNECTINGCALLOUT_MADE \ #define XPC_C_DISCONNECTINGCALLOUT_MADE \
0x00020000 /* disconnecting callout completed */ 0x00020000 /* disconnecting callout completed */
#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
/* /*
* Manages channels on a partition basis. There is one of these structures * Manages channels on a partition basis. There is one of these structures
...@@ -537,33 +509,31 @@ struct xpc_partition { ...@@ -537,33 +509,31 @@ struct xpc_partition {
/* XPC HB infrastructure */ /* XPC HB infrastructure */
u8 remote_rp_version; /* version# of partition's rsvd pg */ u8 remote_rp_version; /* version# of partition's rsvd pg */
struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */ struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
u64 remote_vars_pa; /* phys addr of partition's vars */ u64 remote_vars_pa; /* phys addr of partition's vars */
u64 remote_vars_part_pa; /* phys addr of partition's vars part */ u64 remote_vars_part_pa; /* phys addr of partition's vars part */
u64 last_heartbeat; /* HB at last read */ u64 last_heartbeat; /* HB at last read */
u64 remote_amos_page_pa; /* phys addr of partition's amos page */ u64 remote_amos_page_pa; /* phys addr of partition's amos page */
int remote_act_nasid; /* active part's act/deact nasid */ int remote_act_nasid; /* active part's act/deact nasid */
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
u32 act_IRQ_rcvd; /* IRQs since activation */ u32 act_IRQ_rcvd; /* IRQs since activation */
spinlock_t act_lock; /* protect updating of act_state */ spinlock_t act_lock; /* protect updating of act_state */
u8 act_state; /* from XPC HB viewpoint */ u8 act_state; /* from XPC HB viewpoint */
u8 remote_vars_version; /* version# of partition's vars */ u8 remote_vars_version; /* version# of partition's vars */
enum xpc_retval reason; /* reason partition is deactivating */ enum xpc_retval reason; /* reason partition is deactivating */
int reason_line; /* line# deactivation initiated from */ int reason_line; /* line# deactivation initiated from */
int reactivate_nasid; /* nasid in partition to reactivate */ int reactivate_nasid; /* nasid in partition to reactivate */
unsigned long disengage_request_timeout; /* timeout in jiffies */ unsigned long disengage_request_timeout; /* timeout in jiffies */
struct timer_list disengage_request_timer; struct timer_list disengage_request_timer;
/* XPC infrastructure referencing and teardown control */ /* XPC infrastructure referencing and teardown control */
volatile u8 setup_state; /* infrastructure setup state */ u8 setup_state; /* infrastructure setup state */
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
atomic_t references; /* #of references to infrastructure */ atomic_t references; /* #of references to infrastructure */
/* /*
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
...@@ -572,53 +542,48 @@ struct xpc_partition { ...@@ -572,53 +542,48 @@ struct xpc_partition {
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
*/ */
u8 nchannels; /* #of defined channels supported */
u8 nchannels; /* #of defined channels supported */ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ atomic_t nchannels_engaged; /* #of channels engaged with remote part */
atomic_t nchannels_engaged;/* #of channels engaged with remote part */ struct xpc_channel *channels; /* array of channel structures */
struct xpc_channel *channels;/* array of channel structures */
void *local_GPs_base; /* base address of kmalloc'd space */
void *local_GPs_base; /* base address of kmalloc'd space */ struct xpc_gp *local_GPs; /* local Get/Put values */
struct xpc_gp *local_GPs; /* local Get/Put values */ void *remote_GPs_base; /* base address of kmalloc'd space */
void *remote_GPs_base; /* base address of kmalloc'd space */ struct xpc_gp *remote_GPs; /* copy of remote partition's local */
struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ /* Get/Put values */
/* values */ u64 remote_GPs_pa; /* phys address of remote partition's local */
u64 remote_GPs_pa; /* phys address of remote partition's local */ /* Get/Put values */
/* Get/Put values */
/* fields used to pass args when opening or closing a channel */ /* fields used to pass args when opening or closing a channel */
void *local_openclose_args_base; /* base address of kmalloc'd space */ void *local_openclose_args_base; /* base address of kmalloc'd space */
struct xpc_openclose_args *local_openclose_args; /* local's args */ struct xpc_openclose_args *local_openclose_args; /* local's args */
void *remote_openclose_args_base; /* base address of kmalloc'd space */ void *remote_openclose_args_base; /* base address of kmalloc'd space */
struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
/* args */ /* args */
u64 remote_openclose_args_pa; /* phys addr of remote's args */ u64 remote_openclose_args_pa; /* phys addr of remote's args */
/* IPI sending, receiving and handling related fields */ /* IPI sending, receiving and handling related fields */
int remote_IPI_nasid; /* nasid of where to send IPIs */ int remote_IPI_nasid; /* nasid of where to send IPIs */
int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
u64 local_IPI_amo; /* IPI amo flags yet to be handled */
char IPI_owner[8]; /* IPI owner's name */
struct timer_list dropped_IPI_timer; /* dropped IPI timer */
spinlock_t IPI_lock; /* IPI handler lock */ AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
u64 local_IPI_amo; /* IPI amo flags yet to be handled */
char IPI_owner[8]; /* IPI owner's name */
struct timer_list dropped_IPI_timer; /* dropped IPI timer */
spinlock_t IPI_lock; /* IPI handler lock */
/* channel manager related fields */ /* channel manager related fields */
atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
} ____cacheline_aligned; } ____cacheline_aligned;
/* struct xpc_partition act_state values (for XPC HB) */ /* struct xpc_partition act_state values (for XPC HB) */
#define XPC_P_INACTIVE 0x00 /* partition is not active */ #define XPC_P_INACTIVE 0x00 /* partition is not active */
...@@ -627,11 +592,9 @@ struct xpc_partition { ...@@ -627,11 +592,9 @@ struct xpc_partition {
#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ #define XPC_DEACTIVATE_PARTITION(_p, _reason) \
xpc_deactivate_partition(__LINE__, (_p), (_reason)) xpc_deactivate_partition(__LINE__, (_p), (_reason))
/* struct xpc_partition setup_state values */ /* struct xpc_partition setup_state values */
#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ #define XPC_P_UNSET 0x00 /* infrastructure was never setup */
...@@ -639,8 +602,6 @@ struct xpc_partition { ...@@ -639,8 +602,6 @@ struct xpc_partition {
#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
/* /*
* struct xpc_partition IPI_timer #of seconds to wait before checking for * struct xpc_partition IPI_timer #of seconds to wait before checking for
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
...@@ -648,22 +609,17 @@ struct xpc_partition { ...@@ -648,22 +609,17 @@ struct xpc_partition {
*/ */
#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
/* number of seconds to wait for other partitions to disengage */ /* number of seconds to wait for other partitions to disengage */
#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
/* interval in seconds to print 'waiting disengagement' messages */ /* interval in seconds to print 'waiting disengagement' messages */
#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
/* found in xp_main.c */ /* found in xp_main.c */
extern struct xpc_registration xpc_registrations[]; extern struct xpc_registration xpc_registrations[];
/* found in xpc_main.c */ /* found in xpc_main.c */
extern struct device *xpc_part; extern struct device *xpc_part;
extern struct device *xpc_chan; extern struct device *xpc_chan;
...@@ -676,7 +632,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int); ...@@ -676,7 +632,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int); extern void xpc_disconnect_wait(int);
/* found in xpc_partition.c */ /* found in xpc_partition.c */
extern int xpc_exiting; extern int xpc_exiting;
extern struct xpc_vars *xpc_vars; extern struct xpc_vars *xpc_vars;
...@@ -696,10 +651,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *); ...@@ -696,10 +651,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void); extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void); extern void xpc_check_remote_hb(void);
extern void xpc_deactivate_partition(const int, struct xpc_partition *, extern void xpc_deactivate_partition(const int, struct xpc_partition *,
enum xpc_retval); enum xpc_retval);
extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
/* found in xpc_channel.c */ /* found in xpc_channel.c */
extern void xpc_initiate_connect(int); extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int); extern void xpc_initiate_disconnect(int);
...@@ -714,23 +668,18 @@ extern void xpc_process_channel_activity(struct xpc_partition *); ...@@ -714,23 +668,18 @@ extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *); extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *); extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *, extern void xpc_disconnect_channel(const int, struct xpc_channel *,
enum xpc_retval, unsigned long *); enum xpc_retval, unsigned long *);
extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *); extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part) xpc_wakeup_channel_mgr(struct xpc_partition *part)
{ {
if (atomic_inc_return(&part->channel_mgr_requests) == 1) { if (atomic_inc_return(&part->channel_mgr_requests) == 1)
wake_up(&part->channel_mgr_wq); wake_up(&part->channel_mgr_wq);
}
} }
/* /*
* These next two inlines are used to keep us from tearing down a channel's * These next two inlines are used to keep us from tearing down a channel's
* msg queues while a thread may be referencing them. * msg queues while a thread may be referencing them.
...@@ -747,17 +696,13 @@ xpc_msgqueue_deref(struct xpc_channel *ch) ...@@ -747,17 +696,13 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
s32 refs = atomic_dec_return(&ch->references); s32 refs = atomic_dec_return(&ch->references);
DBUG_ON(refs < 0); DBUG_ON(refs < 0);
if (refs == 0) { if (refs == 0)
xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
}
} }
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
/* /*
* These two inlines are used to keep us from tearing down a partition's * These two inlines are used to keep us from tearing down a partition's
* setup infrastructure while a thread may be referencing it. * setup infrastructure while a thread may be referencing it.
...@@ -767,11 +712,9 @@ xpc_part_deref(struct xpc_partition *part) ...@@ -767,11 +712,9 @@ xpc_part_deref(struct xpc_partition *part)
{ {
s32 refs = atomic_dec_return(&part->references); s32 refs = atomic_dec_return(&part->references);
DBUG_ON(refs < 0); DBUG_ON(refs < 0);
if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
wake_up(&part->teardown_wq); wake_up(&part->teardown_wq);
}
} }
static inline int static inline int
...@@ -779,17 +722,14 @@ xpc_part_ref(struct xpc_partition *part) ...@@ -779,17 +722,14 @@ xpc_part_ref(struct xpc_partition *part)
{ {
int setup; int setup;
atomic_inc(&part->references); atomic_inc(&part->references);
setup = (part->setup_state == XPC_P_SETUP); setup = (part->setup_state == XPC_P_SETUP);
if (!setup) { if (!setup)
xpc_part_deref(part); xpc_part_deref(part);
}
return setup; return setup;
} }
/* /*
* The following macro is to be used for the setting of the reason and * The following macro is to be used for the setting of the reason and
* reason_line fields in both the struct xpc_channel and struct xpc_partition * reason_line fields in both the struct xpc_channel and struct xpc_partition
...@@ -801,8 +741,6 @@ xpc_part_ref(struct xpc_partition *part) ...@@ -801,8 +741,6 @@ xpc_part_ref(struct xpc_partition *part)
(_p)->reason_line = _line; \ (_p)->reason_line = _line; \
} }
/* /*
* This next set of inlines are used to keep track of when a partition is * This next set of inlines are used to keep track of when a partition is
* potentially engaged in accessing memory belonging to another partition. * potentially engaged in accessing memory belonging to another partition.
...@@ -812,23 +750,24 @@ static inline void ...@@ -812,23 +750,24 @@ static inline void
xpc_mark_partition_engaged(struct xpc_partition *part) xpc_mark_partition_engaged(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); (XPC_ENGAGED_PARTITIONS_AMO *
sizeof(AMO_t)));
local_irq_save(irq_flags); local_irq_save(irq_flags);
/* set bit corresponding to our partid in remote partition's AMO */ /* set bit corresponding to our partid in remote partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
(1UL << sn_partition_id)); (1UL << sn_partition_id));
/* /*
* We must always use the nofault function regardless of whether we * We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
* didn't, we'd never know that the other partition is down and would * didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out. * keep sending IPIs and AMOs to it until the heartbeat times out.
*/ */
(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
variable), xp_nofault_PIOR_target)); variable),
xp_nofault_PIOR_target));
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
} }
...@@ -837,23 +776,24 @@ static inline void ...@@ -837,23 +776,24 @@ static inline void
xpc_mark_partition_disengaged(struct xpc_partition *part) xpc_mark_partition_disengaged(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); (XPC_ENGAGED_PARTITIONS_AMO *
sizeof(AMO_t)));
local_irq_save(irq_flags); local_irq_save(irq_flags);
/* clear bit corresponding to our partid in remote partition's AMO */ /* clear bit corresponding to our partid in remote partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
~(1UL << sn_partition_id)); ~(1UL << sn_partition_id));
/* /*
* We must always use the nofault function regardless of whether we * We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
* didn't, we'd never know that the other partition is down and would * didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out. * keep sending IPIs and AMOs to it until the heartbeat times out.
*/ */
(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
variable), xp_nofault_PIOR_target)); variable),
xp_nofault_PIOR_target));
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
} }
...@@ -862,23 +802,23 @@ static inline void ...@@ -862,23 +802,23 @@ static inline void
xpc_request_partition_disengage(struct xpc_partition *part) xpc_request_partition_disengage(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
local_irq_save(irq_flags); local_irq_save(irq_flags);
/* set bit corresponding to our partid in remote partition's AMO */ /* set bit corresponding to our partid in remote partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
(1UL << sn_partition_id)); (1UL << sn_partition_id));
/* /*
* We must always use the nofault function regardless of whether we * We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
* didn't, we'd never know that the other partition is down and would * didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out. * keep sending IPIs and AMOs to it until the heartbeat times out.
*/ */
(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
variable), xp_nofault_PIOR_target)); variable),
xp_nofault_PIOR_target));
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
} }
...@@ -887,23 +827,23 @@ static inline void ...@@ -887,23 +827,23 @@ static inline void
xpc_cancel_partition_disengage_request(struct xpc_partition *part) xpc_cancel_partition_disengage_request(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
local_irq_save(irq_flags); local_irq_save(irq_flags);
/* clear bit corresponding to our partid in remote partition's AMO */ /* clear bit corresponding to our partid in remote partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
~(1UL << sn_partition_id)); ~(1UL << sn_partition_id));
/* /*
* We must always use the nofault function regardless of whether we * We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
* didn't, we'd never know that the other partition is down and would * didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out. * keep sending IPIs and AMOs to it until the heartbeat times out.
*/ */
(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
variable), xp_nofault_PIOR_target)); variable),
xp_nofault_PIOR_target));
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
} }
...@@ -913,10 +853,9 @@ xpc_partition_engaged(u64 partid_mask) ...@@ -913,10 +853,9 @@ xpc_partition_engaged(u64 partid_mask)
{ {
AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
/* return our partition's AMO variable ANDed with partid_mask */ /* return our partition's AMO variable ANDed with partid_mask */
return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
partid_mask); partid_mask);
} }
static inline u64 static inline u64
...@@ -924,10 +863,9 @@ xpc_partition_disengage_requested(u64 partid_mask) ...@@ -924,10 +863,9 @@ xpc_partition_disengage_requested(u64 partid_mask)
{ {
AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
/* return our partition's AMO variable ANDed with partid_mask */ /* return our partition's AMO variable ANDed with partid_mask */
return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
partid_mask); partid_mask);
} }
static inline void static inline void
...@@ -935,10 +873,9 @@ xpc_clear_partition_engaged(u64 partid_mask) ...@@ -935,10 +873,9 @@ xpc_clear_partition_engaged(u64 partid_mask)
{ {
AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
/* clear bit(s) based on partid_mask in our partition's AMO */ /* clear bit(s) based on partid_mask in our partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
~partid_mask); ~partid_mask);
} }
static inline void static inline void
...@@ -946,14 +883,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask) ...@@ -946,14 +883,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
{ {
AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
/* clear bit(s) based on partid_mask in our partition's AMO */ /* clear bit(s) based on partid_mask in our partition's AMO */
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
~partid_mask); ~partid_mask);
} }
/* /*
* The following set of macros and inlines are used for the sending and * The following set of macros and inlines are used for the sending and
* receiving of IPIs (also known as IRQs). There are two flavors of IPIs, * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
...@@ -964,20 +898,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask) ...@@ -964,20 +898,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
static inline u64 static inline u64
xpc_IPI_receive(AMO_t *amo) xpc_IPI_receive(AMO_t *amo)
{ {
return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
} }
static inline enum xpc_retval static inline enum xpc_retval
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
{ {
int ret = 0; int ret = 0;
unsigned long irq_flags; unsigned long irq_flags;
local_irq_save(irq_flags); local_irq_save(irq_flags);
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
/* /*
...@@ -986,15 +918,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) ...@@ -986,15 +918,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
* didn't, we'd never know that the other partition is down and would * didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out. * keep sending IPIs and AMOs to it until the heartbeat times out.
*/ */
ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
xp_nofault_PIOR_target)); xp_nofault_PIOR_target));
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
return ((ret == 0) ? xpcSuccess : xpcPioReadError); return ((ret == 0) ? xpcSuccess : xpcPioReadError);
} }
/* /*
* IPIs associated with SGI_XPC_ACTIVATE IRQ. * IPIs associated with SGI_XPC_ACTIVATE IRQ.
*/ */
...@@ -1004,47 +935,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) ...@@ -1004,47 +935,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
*/ */
static inline void static inline void
xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
int to_phys_cpuid) int to_phys_cpuid)
{ {
int w_index = XPC_NASID_W_INDEX(from_nasid); int w_index = XPC_NASID_W_INDEX(from_nasid);
int b_index = XPC_NASID_B_INDEX(from_nasid); int b_index = XPC_NASID_B_INDEX(from_nasid);
AMO_t *amos = (AMO_t *) __va(amos_page_pa + AMO_t *amos = (AMO_t *)__va(amos_page_pa +
(XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
(void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
(void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, to_phys_cpuid, SGI_XPC_ACTIVATE);
to_phys_cpuid, SGI_XPC_ACTIVATE);
} }
static inline void static inline void
xpc_IPI_send_activate(struct xpc_vars *vars) xpc_IPI_send_activate(struct xpc_vars *vars)
{ {
xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
vars->act_nasid, vars->act_phys_cpuid); vars->act_nasid, vars->act_phys_cpuid);
} }
static inline void static inline void
xpc_IPI_send_activated(struct xpc_partition *part) xpc_IPI_send_activated(struct xpc_partition *part)
{ {
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
part->remote_act_nasid, part->remote_act_phys_cpuid); part->remote_act_nasid,
part->remote_act_phys_cpuid);
} }
static inline void static inline void
xpc_IPI_send_reactivate(struct xpc_partition *part) xpc_IPI_send_reactivate(struct xpc_partition *part)
{ {
xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
} }
static inline void static inline void
xpc_IPI_send_disengage(struct xpc_partition *part) xpc_IPI_send_disengage(struct xpc_partition *part)
{ {
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
part->remote_act_nasid, part->remote_act_phys_cpuid); part->remote_act_nasid,
part->remote_act_phys_cpuid);
} }
/* /*
* IPIs associated with SGI_XPC_NOTIFY IRQ. * IPIs associated with SGI_XPC_NOTIFY IRQ.
*/ */
...@@ -1058,33 +989,28 @@ xpc_IPI_send_disengage(struct xpc_partition *part) ...@@ -1058,33 +989,28 @@ xpc_IPI_send_disengage(struct xpc_partition *part)
static inline void static inline void
xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
unsigned long *irq_flags) unsigned long *irq_flags)
{ {
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xpc_retval ret; enum xpc_retval ret;
if (likely(part->act_state != XPC_P_DEACTIVATING)) { if (likely(part->act_state != XPC_P_DEACTIVATING)) {
ret = xpc_IPI_send(part->remote_IPI_amo_va, ret = xpc_IPI_send(part->remote_IPI_amo_va,
(u64) ipi_flag << (ch->number * 8), (u64)ipi_flag << (ch->number * 8),
part->remote_IPI_nasid, part->remote_IPI_nasid,
part->remote_IPI_phys_cpuid, part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
SGI_XPC_NOTIFY);
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
ipi_flag_string, ch->partid, ch->number, ret); ipi_flag_string, ch->partid, ch->number, ret);
if (unlikely(ret != xpcSuccess)) { if (unlikely(ret != xpcSuccess)) {
if (irq_flags != NULL) { if (irq_flags != NULL)
spin_unlock_irqrestore(&ch->lock, *irq_flags); spin_unlock_irqrestore(&ch->lock, *irq_flags);
}
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
if (irq_flags != NULL) { if (irq_flags != NULL)
spin_lock_irqsave(&ch->lock, *irq_flags); spin_lock_irqsave(&ch->lock, *irq_flags);
}
} }
} }
} }
/* /*
* Make it look like the remote partition, which is associated with the * Make it look like the remote partition, which is associated with the
* specified channel, sent us an IPI. This faked IPI will be handled * specified channel, sent us an IPI. This faked IPI will be handled
...@@ -1095,18 +1021,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, ...@@ -1095,18 +1021,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
static inline void static inline void
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
char *ipi_flag_string) char *ipi_flag_string)
{ {
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
ipi_flag_string, ch->partid, ch->number); ipi_flag_string, ch->partid, ch->number);
} }
/* /*
* The sending and receiving of IPIs includes the setting of an AMO variable * The sending and receiving of IPIs includes the setting of an AMO variable
* to indicate the reason the IPI was sent. The 64-bit variable is divided * to indicate the reason the IPI was sent. The 64-bit variable is divided
...@@ -1121,21 +1045,18 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, ...@@ -1121,21 +1045,18 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
#define XPC_IPI_OPENREPLY 0x08 #define XPC_IPI_OPENREPLY 0x08
#define XPC_IPI_MSGREQUEST 0x10 #define XPC_IPI_MSGREQUEST 0x10
/* given an AMO variable and a channel#, get its associated IPI flags */ /* given an AMO variable and a channel#, get its associated IPI flags */
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
static inline void static inline void
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
{ {
struct xpc_openclose_args *args = ch->local_openclose_args; struct xpc_openclose_args *args = ch->local_openclose_args;
args->reason = ch->reason; args->reason = ch->reason;
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
...@@ -1152,7 +1073,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -1152,7 +1073,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
{ {
struct xpc_openclose_args *args = ch->local_openclose_args; struct xpc_openclose_args *args = ch->local_openclose_args;
args->msg_size = ch->msg_size; args->msg_size = ch->msg_size;
args->local_nentries = ch->local_nentries; args->local_nentries = ch->local_nentries;
...@@ -1164,7 +1084,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -1164,7 +1084,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
{ {
struct xpc_openclose_args *args = ch->local_openclose_args; struct xpc_openclose_args *args = ch->local_openclose_args;
args->remote_nentries = ch->remote_nentries; args->remote_nentries = ch->remote_nentries;
args->local_nentries = ch->local_nentries; args->local_nentries = ch->local_nentries;
args->local_msgqueue_pa = __pa(ch->local_msgqueue); args->local_msgqueue_pa = __pa(ch->local_msgqueue);
...@@ -1184,7 +1103,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) ...@@ -1184,7 +1103,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
} }
/* /*
* Memory for XPC's AMO variables is allocated by the MSPEC driver. These * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
* pages are located in the lowest granule. The lowest granule uses 4k pages * pages are located in the lowest granule. The lowest granule uses 4k pages
...@@ -1201,13 +1119,10 @@ xpc_IPI_init(int index) ...@@ -1201,13 +1119,10 @@ xpc_IPI_init(int index)
{ {
AMO_t *amo = xpc_vars->amos_page + index; AMO_t *amo = xpc_vars->amos_page + index;
(void)xpc_IPI_receive(amo); /* clear AMO variable */
(void) xpc_IPI_receive(amo); /* clear AMO variable */
return amo; return amo;
} }
static inline enum xpc_retval static inline enum xpc_retval
xpc_map_bte_errors(bte_result_t error) xpc_map_bte_errors(bte_result_t error)
{ {
...@@ -1220,22 +1135,31 @@ xpc_map_bte_errors(bte_result_t error) ...@@ -1220,22 +1135,31 @@ xpc_map_bte_errors(bte_result_t error)
return xpcBteUnmappedError; return xpcBteUnmappedError;
} }
switch (error) { switch (error) {
case BTE_SUCCESS: return xpcSuccess; case BTE_SUCCESS:
case BTEFAIL_DIR: return xpcBteDirectoryError; return xpcSuccess;
case BTEFAIL_POISON: return xpcBtePoisonError; case BTEFAIL_DIR:
case BTEFAIL_WERR: return xpcBteWriteError; return xpcBteDirectoryError;
case BTEFAIL_ACCESS: return xpcBteAccessError; case BTEFAIL_POISON:
case BTEFAIL_PWERR: return xpcBtePWriteError; return xpcBtePoisonError;
case BTEFAIL_PRERR: return xpcBtePReadError; case BTEFAIL_WERR:
case BTEFAIL_TOUT: return xpcBteTimeOutError; return xpcBteWriteError;
case BTEFAIL_XTERR: return xpcBteXtalkError; case BTEFAIL_ACCESS:
case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; return xpcBteAccessError;
default: return xpcBteUnmappedError; case BTEFAIL_PWERR:
return xpcBtePWriteError;
case BTEFAIL_PRERR:
return xpcBtePReadError;
case BTEFAIL_TOUT:
return xpcBteTimeOutError;
case BTEFAIL_XTERR:
return xpcBteXtalkError;
case BTEFAIL_NOTAVAIL:
return xpcBteNotAvailable;
default:
return xpcBteUnmappedError;
} }
} }
/* /*
* Check to see if there is any channel activity to/from the specified * Check to see if there is any channel activity to/from the specified
* partition. * partition.
...@@ -1246,11 +1170,9 @@ xpc_check_for_channel_activity(struct xpc_partition *part) ...@@ -1246,11 +1170,9 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
u64 IPI_amo; u64 IPI_amo;
unsigned long irq_flags; unsigned long irq_flags;
IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
if (IPI_amo == 0) { if (IPI_amo == 0)
return; return;
}
spin_lock_irqsave(&part->IPI_lock, irq_flags); spin_lock_irqsave(&part->IPI_lock, irq_flags);
part->local_IPI_amo |= IPI_amo; part->local_IPI_amo |= IPI_amo;
...@@ -1262,6 +1184,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part) ...@@ -1262,6 +1184,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
xpc_wakeup_channel_mgr(part); xpc_wakeup_channel_mgr(part);
} }
#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
#endif /* _ASM_IA64_SN_XPC_H */
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition Communication (XPC) channel support. * Cross Partition Communication (XPC) channel support.
* *
...@@ -15,7 +14,6 @@ ...@@ -15,7 +14,6 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -25,8 +23,7 @@ ...@@ -25,8 +23,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/xpc.h> #include "xpc.h"
/* /*
* Guarantee that the kzalloc'd memory is cacheline aligned. * Guarantee that the kzalloc'd memory is cacheline aligned.
...@@ -36,22 +33,21 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) ...@@ -36,22 +33,21 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{ {
/* see if kzalloc will give us cachline aligned memory by default */ /* see if kzalloc will give us cachline aligned memory by default */
*base = kzalloc(size, flags); *base = kzalloc(size, flags);
if (*base == NULL) { if (*base == NULL)
return NULL; return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base; return *base;
}
kfree(*base); kfree(*base);
/* nope, we'll have to do it ourselves */ /* nope, we'll have to do it ourselves */
*base = kzalloc(size + L1_CACHE_BYTES, flags); *base = kzalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) { if (*base == NULL)
return NULL; return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
return (void *)L1_CACHE_ALIGN((u64)*base);
}
/* /*
* Set up the initial values for the XPartition Communication channels. * Set up the initial values for the XPartition Communication channels.
...@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) ...@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
int ch_number; int ch_number;
struct xpc_channel *ch; struct xpc_channel *ch;
for (ch_number = 0; ch_number < part->nchannels; ch_number++) { for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number]; ch = &part->channels[ch_number];
...@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) ...@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
ch->local_GP = &part->local_GPs[ch_number]; ch->local_GP = &part->local_GPs[ch_number];
ch->local_openclose_args = ch->local_openclose_args =
&part->local_openclose_args[ch_number]; &part->local_openclose_args[ch_number];
atomic_set(&ch->kthreads_assigned, 0); atomic_set(&ch->kthreads_assigned, 0);
atomic_set(&ch->kthreads_idle, 0); atomic_set(&ch->kthreads_idle, 0);
...@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) ...@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
} }
} }
/* /*
* Setup the infrastructure necessary to support XPartition Communication * Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one. * between the specified remote partition and the local one.
...@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
struct timer_list *timer; struct timer_list *timer;
partid_t partid = XPC_PARTID(part); partid_t partid = XPC_PARTID(part);
/* /*
* Zero out MOST of the entry for this partition. Only the fields * Zero out MOST of the entry for this partition. Only the fields
* starting with `nchannels' will be zeroed. The preceding fields must * starting with `nchannels' will be zeroed. The preceding fields must
...@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part)
* referenced during this memset() operation. * referenced during this memset() operation.
*/ */
memset(&part->nchannels, 0, sizeof(struct xpc_partition) - memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
offsetof(struct xpc_partition, nchannels)); offsetof(struct xpc_partition, nchannels));
/* /*
* Allocate all of the channel structures as a contiguous chunk of * Allocate all of the channel structures as a contiguous chunk of
* memory. * memory.
*/ */
part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
GFP_KERNEL); GFP_KERNEL);
if (part->channels == NULL) { if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n"); dev_err(xpc_chan, "can't get memory for channels\n");
return xpcNoMemory; return xpcNoMemory;
...@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->nchannels = XPC_NCHANNELS; part->nchannels = XPC_NCHANNELS;
/* allocate all the required GET/PUT values */ /* allocate all the required GET/PUT values */
part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->local_GPs_base); GFP_KERNEL,
&part->local_GPs_base);
if (part->local_GPs == NULL) { if (part->local_GPs == NULL) {
kfree(part->channels); kfree(part->channels);
part->channels = NULL; part->channels = NULL;
...@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
} }
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->remote_GPs_base); GFP_KERNEL,
&part->
remote_GPs_base);
if (part->remote_GPs == NULL) { if (part->remote_GPs == NULL) {
dev_err(xpc_chan, "can't get memory for remote get/put " dev_err(xpc_chan, "can't get memory for remote get/put "
"values\n"); "values\n");
...@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
return xpcNoMemory; return xpcNoMemory;
} }
/* allocate all the required open and close args */ /* allocate all the required open and close args */
part->local_openclose_args = xpc_kzalloc_cacheline_aligned( part->local_openclose_args =
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->local_openclose_args_base); &part->local_openclose_args_base);
if (part->local_openclose_args == NULL) { if (part->local_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for local connect args\n"); dev_err(xpc_chan, "can't get memory for local connect args\n");
kfree(part->remote_GPs_base); kfree(part->remote_GPs_base);
...@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
return xpcNoMemory; return xpcNoMemory;
} }
part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( part->remote_openclose_args =
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->remote_openclose_args_base); &part->remote_openclose_args_base);
if (part->remote_openclose_args == NULL) { if (part->remote_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for remote connect args\n"); dev_err(xpc_chan, "can't get memory for remote connect args\n");
kfree(part->local_openclose_args_base); kfree(part->local_openclose_args_base);
...@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
return xpcNoMemory; return xpcNoMemory;
} }
xpc_initialize_channels(part, partid); xpc_initialize_channels(part, partid);
atomic_set(&part->nchannels_active, 0); atomic_set(&part->nchannels_active, 0);
atomic_set(&part->nchannels_engaged, 0); atomic_set(&part->nchannels_engaged, 0);
/* local_IPI_amo were set to 0 by an earlier memset() */ /* local_IPI_amo were set to 0 by an earlier memset() */
/* Initialize this partitions AMO_t structure */ /* Initialize this partitions AMO_t structure */
...@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
sprintf(part->IPI_owner, "xpc%02d", partid); sprintf(part->IPI_owner, "xpc%02d", partid);
ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
part->IPI_owner, (void *) (u64) partid); part->IPI_owner, (void *)(u64)partid);
if (ret != 0) { if (ret != 0) {
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
"errno=%d\n", -ret); "errno=%d\n", -ret);
...@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part)
/* Setup a timer to check for dropped IPIs */ /* Setup a timer to check for dropped IPIs */
timer = &part->dropped_IPI_timer; timer = &part->dropped_IPI_timer;
init_timer(timer); init_timer(timer);
timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
timer->data = (unsigned long) part; timer->data = (unsigned long)part;
timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
add_timer(timer); add_timer(timer);
...@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
*/ */
part->setup_state = XPC_P_SETUP; part->setup_state = XPC_P_SETUP;
/* /*
* Setup the per partition specific variables required by the * Setup the per partition specific variables required by the
* remote partition to establish channel connections with us. * remote partition to establish channel connections with us.
...@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
*/ */
xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
xpc_vars_part[partid].openclose_args_pa = xpc_vars_part[partid].openclose_args_pa =
__pa(part->local_openclose_args); __pa(part->local_openclose_args);
xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
...@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Create a wrapper that hides the underlying mechanism for pulling a cacheline * Create a wrapper that hides the underlying mechanism for pulling a cacheline
* (or multiple cachelines) from a remote partition. * (or multiple cachelines) from a remote partition.
...@@ -266,24 +256,21 @@ xpc_setup_infrastructure(struct xpc_partition *part) ...@@ -266,24 +256,21 @@ xpc_setup_infrastructure(struct xpc_partition *part)
*/ */
static enum xpc_retval static enum xpc_retval
xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
const void *src, size_t cnt) const void *src, size_t cnt)
{ {
bte_result_t bte_ret; bte_result_t bte_ret;
DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING)
return part->reason; return part->reason;
}
bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
(BTE_NORMAL | BTE_WACQUIRE), NULL); (BTE_NORMAL | BTE_WACQUIRE), NULL);
if (bte_ret == BTE_SUCCESS) { if (bte_ret == BTE_SUCCESS)
return xpcSuccess; return xpcSuccess;
}
dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
XPC_PARTID(part), bte_ret); XPC_PARTID(part), bte_ret);
...@@ -291,7 +278,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, ...@@ -291,7 +278,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
return xpc_map_bte_errors(bte_ret); return xpc_map_bte_errors(bte_ret);
} }
/* /*
* Pull the remote per partition specific variables from the specified * Pull the remote per partition specific variables from the specified
* partition. * partition.
...@@ -301,41 +287,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) ...@@ -301,41 +287,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
{ {
u8 buffer[L1_CACHE_BYTES * 2]; u8 buffer[L1_CACHE_BYTES * 2];
struct xpc_vars_part *pulled_entry_cacheline = struct xpc_vars_part *pulled_entry_cacheline =
(struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
struct xpc_vars_part *pulled_entry; struct xpc_vars_part *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa; u64 remote_entry_cacheline_pa, remote_entry_pa;
partid_t partid = XPC_PARTID(part); partid_t partid = XPC_PARTID(part);
enum xpc_retval ret; enum xpc_retval ret;
/* pull the cacheline that contains the variables we're interested in */ /* pull the cacheline that contains the variables we're interested in */
DBUG_ON(part->remote_vars_part_pa != DBUG_ON(part->remote_vars_part_pa !=
L1_CACHE_ALIGN(part->remote_vars_part_pa)); L1_CACHE_ALIGN(part->remote_vars_part_pa));
DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
remote_entry_pa = part->remote_vars_part_pa + remote_entry_pa = part->remote_vars_part_pa +
sn_partition_id * sizeof(struct xpc_vars_part); sn_partition_id * sizeof(struct xpc_vars_part);
remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
(remote_entry_pa & (L1_CACHE_BYTES - 1))); (remote_entry_pa &
(L1_CACHE_BYTES - 1)));
ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
(void *) remote_entry_cacheline_pa, (void *)remote_entry_cacheline_pa,
L1_CACHE_BYTES); L1_CACHE_BYTES);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from " dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
"partition %d, ret=%d\n", partid, ret); "partition %d, ret=%d\n", partid, ret);
return ret; return ret;
} }
/* see if they've been set up yet */ /* see if they've been set up yet */
if (pulled_entry->magic != XPC_VP_MAGIC1 && if (pulled_entry->magic != XPC_VP_MAGIC1 &&
pulled_entry->magic != XPC_VP_MAGIC2) { pulled_entry->magic != XPC_VP_MAGIC2) {
if (pulled_entry->magic != 0) { if (pulled_entry->magic != 0) {
dev_dbg(xpc_chan, "partition %d's XPC vars_part for " dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
...@@ -353,8 +338,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) ...@@ -353,8 +338,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
/* validate the variables */ /* validate the variables */
if (pulled_entry->GPs_pa == 0 || if (pulled_entry->GPs_pa == 0 ||
pulled_entry->openclose_args_pa == 0 || pulled_entry->openclose_args_pa == 0 ||
pulled_entry->IPI_amo_pa == 0) { pulled_entry->IPI_amo_pa == 0) {
dev_err(xpc_chan, "partition %d's XPC vars_part for " dev_err(xpc_chan, "partition %d's XPC vars_part for "
"partition %d are not valid\n", partid, "partition %d are not valid\n", partid,
...@@ -366,29 +351,26 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) ...@@ -366,29 +351,26 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
part->remote_GPs_pa = pulled_entry->GPs_pa; part->remote_GPs_pa = pulled_entry->GPs_pa;
part->remote_openclose_args_pa = part->remote_openclose_args_pa =
pulled_entry->openclose_args_pa; pulled_entry->openclose_args_pa;
part->remote_IPI_amo_va = part->remote_IPI_amo_va =
(AMO_t *) __va(pulled_entry->IPI_amo_pa); (AMO_t *)__va(pulled_entry->IPI_amo_pa);
part->remote_IPI_nasid = pulled_entry->IPI_nasid; part->remote_IPI_nasid = pulled_entry->IPI_nasid;
part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
if (part->nchannels > pulled_entry->nchannels) { if (part->nchannels > pulled_entry->nchannels)
part->nchannels = pulled_entry->nchannels; part->nchannels = pulled_entry->nchannels;
}
/* let the other side know that we've pulled their variables */ /* let the other side know that we've pulled their variables */
xpc_vars_part[partid].magic = XPC_VP_MAGIC2; xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
} }
if (pulled_entry->magic == XPC_VP_MAGIC1) { if (pulled_entry->magic == XPC_VP_MAGIC1)
return xpcRetry; return xpcRetry;
}
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Get the IPI flags and pull the openclose args and/or remote GPs as needed. * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
*/ */
...@@ -399,23 +381,23 @@ xpc_get_IPI_flags(struct xpc_partition *part) ...@@ -399,23 +381,23 @@ xpc_get_IPI_flags(struct xpc_partition *part)
u64 IPI_amo; u64 IPI_amo;
enum xpc_retval ret; enum xpc_retval ret;
/* /*
* See if there are any IPI flags to be handled. * See if there are any IPI flags to be handled.
*/ */
spin_lock_irqsave(&part->IPI_lock, irq_flags); spin_lock_irqsave(&part->IPI_lock, irq_flags);
if ((IPI_amo = part->local_IPI_amo) != 0) { IPI_amo = part->local_IPI_amo;
if (IPI_amo != 0)
part->local_IPI_amo = 0; part->local_IPI_amo = 0;
}
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part, ret = xpc_pull_remote_cachelines(part,
part->remote_openclose_args, part->remote_openclose_args,
(void *) part->remote_openclose_args_pa, (void *)part->
XPC_OPENCLOSE_ARGS_SIZE); remote_openclose_args_pa,
XPC_OPENCLOSE_ARGS_SIZE);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -430,8 +412,8 @@ xpc_get_IPI_flags(struct xpc_partition *part) ...@@ -430,8 +412,8 @@ xpc_get_IPI_flags(struct xpc_partition *part)
if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part, part->remote_GPs, ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
(void *) part->remote_GPs_pa, (void *)part->remote_GPs_pa,
XPC_GP_SIZE); XPC_GP_SIZE);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -446,7 +428,6 @@ xpc_get_IPI_flags(struct xpc_partition *part) ...@@ -446,7 +428,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
return IPI_amo; return IPI_amo;
} }
/* /*
* Allocate the local message queue and the notify queue. * Allocate the local message queue and the notify queue.
*/ */
...@@ -457,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) ...@@ -457,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
int nentries; int nentries;
size_t nbytes; size_t nbytes;
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
// >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->local_nentries; nentries > 0; nentries--) { for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size; nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL, GFP_KERNEL,
&ch->local_msgqueue_base); &ch->local_msgqueue_base);
if (ch->local_msgqueue == NULL) { if (ch->local_msgqueue == NULL)
continue; continue;
}
nbytes = nentries * sizeof(struct xpc_notify); nbytes = nentries * sizeof(struct xpc_notify);
ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
...@@ -497,7 +472,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) ...@@ -497,7 +472,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
return xpcNoMemory; return xpcNoMemory;
} }
/* /*
* Allocate the cached remote message queue. * Allocate the cached remote message queue.
*/ */
...@@ -508,22 +482,16 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) ...@@ -508,22 +482,16 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
int nentries; int nentries;
size_t nbytes; size_t nbytes;
DBUG_ON(ch->remote_nentries <= 0); DBUG_ON(ch->remote_nentries <= 0);
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
// >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->remote_nentries; nentries > 0; nentries--) { for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size; nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL, GFP_KERNEL,
&ch->remote_msgqueue_base); &ch->remote_msgqueue_base);
if (ch->remote_msgqueue == NULL) { if (ch->remote_msgqueue == NULL)
continue; continue;
}
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries) { if (nentries < ch->remote_nentries) {
...@@ -542,7 +510,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) ...@@ -542,7 +510,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
return xpcNoMemory; return xpcNoMemory;
} }
/* /*
* Allocate message queues and other stuff associated with a channel. * Allocate message queues and other stuff associated with a channel.
* *
...@@ -554,14 +521,14 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) ...@@ -554,14 +521,14 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
unsigned long irq_flags; unsigned long irq_flags;
enum xpc_retval ret; enum xpc_retval ret;
DBUG_ON(ch->flags & XPC_C_SETUP); DBUG_ON(ch->flags & XPC_C_SETUP);
if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { ret = xpc_allocate_local_msgqueue(ch);
if (ret != xpcSuccess)
return ret; return ret;
}
if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { ret = xpc_allocate_remote_msgqueue(ch);
if (ret != xpcSuccess) {
kfree(ch->local_msgqueue_base); kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL; ch->local_msgqueue = NULL;
kfree(ch->notify_queue); kfree(ch->notify_queue);
...@@ -576,7 +543,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) ...@@ -576,7 +543,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Process a connect message from a remote partition. * Process a connect message from a remote partition.
* *
...@@ -588,11 +554,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -588,11 +554,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{ {
enum xpc_retval ret; enum xpc_retval ret;
DBUG_ON(!spin_is_locked(&ch->lock)); DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_OPENREQUEST) || if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) { !(ch->flags & XPC_C_ROPENREQUEST)) {
/* nothing more to do for now */ /* nothing more to do for now */
return; return;
} }
...@@ -603,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -603,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
ret = xpc_allocate_msgqueues(ch); ret = xpc_allocate_msgqueues(ch);
spin_lock_irqsave(&ch->lock, *irq_flags); spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpcSuccess) { if (ret != xpcSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
}
if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
return; return;
}
DBUG_ON(!(ch->flags & XPC_C_SETUP)); DBUG_ON(!(ch->flags & XPC_C_SETUP));
DBUG_ON(ch->local_msgqueue == NULL); DBUG_ON(ch->local_msgqueue == NULL);
...@@ -620,23 +584,21 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -620,23 +584,21 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
xpc_IPI_send_openreply(ch, irq_flags); xpc_IPI_send_openreply(ch, irq_flags);
} }
if (!(ch->flags & XPC_C_ROPENREPLY)) { if (!(ch->flags & XPC_C_ROPENREPLY))
return; return;
}
DBUG_ON(ch->remote_msgqueue_pa == 0); DBUG_ON(ch->remote_msgqueue_pa == 0);
ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
dev_info(xpc_chan, "channel %d to partition %d connected\n", dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid); ch->number, ch->partid);
spin_unlock_irqrestore(&ch->lock, *irq_flags); spin_unlock_irqrestore(&ch->lock, *irq_flags);
xpc_create_kthreads(ch, 1, 0); xpc_create_kthreads(ch, 1, 0);
spin_lock_irqsave(&ch->lock, *irq_flags); spin_lock_irqsave(&ch->lock, *irq_flags);
} }
/* /*
* Notify those who wanted to be notified upon delivery of their message. * Notify those who wanted to be notified upon delivery of their message.
*/ */
...@@ -647,7 +609,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) ...@@ -647,7 +609,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
u8 notify_type; u8 notify_type;
s64 get = ch->w_remote_GP.get - 1; s64 get = ch->w_remote_GP.get - 1;
while (++get < put && atomic_read(&ch->n_to_notify) > 0) { while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
notify = &ch->notify_queue[get % ch->local_nentries]; notify = &ch->notify_queue[get % ch->local_nentries];
...@@ -660,8 +621,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) ...@@ -660,8 +621,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
*/ */
notify_type = notify->type; notify_type = notify->type;
if (notify_type == 0 || if (notify_type == 0 ||
cmpxchg(&notify->type, notify_type, 0) != cmpxchg(&notify->type, notify_type, 0) != notify_type) {
notify_type) {
continue; continue;
} }
...@@ -672,20 +632,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) ...@@ -672,20 +632,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
if (notify->func != NULL) { if (notify->func != NULL) {
dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n", "msg_number=%ld, partid=%d, channel=%d\n",
(void *) notify, get, ch->partid, ch->number); (void *)notify, get, ch->partid, ch->number);
notify->func(reason, ch->partid, ch->number, notify->func(reason, ch->partid, ch->number,
notify->key); notify->key);
dev_dbg(xpc_chan, "notify->func() returned, " dev_dbg(xpc_chan, "notify->func() returned, "
"notify=0x%p, msg_number=%ld, partid=%d, " "notify=0x%p, msg_number=%ld, partid=%d, "
"channel=%d\n", (void *) notify, get, "channel=%d\n", (void *)notify, get,
ch->partid, ch->number); ch->partid, ch->number);
} }
} }
} }
/* /*
* Free up message queues and other stuff that were allocated for the specified * Free up message queues and other stuff that were allocated for the specified
* channel. * channel.
...@@ -733,7 +692,6 @@ xpc_free_msgqueues(struct xpc_channel *ch) ...@@ -733,7 +692,6 @@ xpc_free_msgqueues(struct xpc_channel *ch)
} }
} }
/* /*
* spin_lock_irqsave() is expected to be held on entry. * spin_lock_irqsave() is expected to be held on entry.
*/ */
...@@ -743,46 +701,41 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -743,46 +701,41 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
DBUG_ON(!spin_is_locked(&ch->lock)); DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_DISCONNECTING)) { if (!(ch->flags & XPC_C_DISCONNECTING))
return; return;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
/* make sure all activity has settled down first */ /* make sure all activity has settled down first */
if (atomic_read(&ch->kthreads_assigned) > 0 || if (atomic_read(&ch->kthreads_assigned) > 0 ||
atomic_read(&ch->references) > 0) { atomic_read(&ch->references) > 0) {
return; return;
} }
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING) {
/* can't proceed until the other side disengages from us */ /* can't proceed until the other side disengages from us */
if (xpc_partition_engaged(1UL << ch->partid)) { if (xpc_partition_engaged(1UL << ch->partid))
return; return;
}
} else { } else {
/* as long as the other side is up do the full protocol */ /* as long as the other side is up do the full protocol */
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { if (!(ch->flags & XPC_C_RCLOSEREQUEST))
return; return;
}
if (!(ch->flags & XPC_C_CLOSEREPLY)) { if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY; ch->flags |= XPC_C_CLOSEREPLY;
xpc_IPI_send_closereply(ch, irq_flags); xpc_IPI_send_closereply(ch, irq_flags);
} }
if (!(ch->flags & XPC_C_RCLOSEREPLY)) { if (!(ch->flags & XPC_C_RCLOSEREPLY))
return; return;
}
} }
/* wake those waiting for notify completion */ /* wake those waiting for notify completion */
...@@ -809,7 +762,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -809,7 +762,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (channel_was_connected) { if (channel_was_connected) {
dev_info(xpc_chan, "channel %d to partition %d disconnected, " dev_info(xpc_chan, "channel %d to partition %d disconnected, "
"reason=%d\n", ch->number, ch->partid, ch->reason); "reason=%d\n", ch->number, ch->partid, ch->reason);
} }
if (ch->flags & XPC_C_WDISCONNECT) { if (ch->flags & XPC_C_WDISCONNECT) {
...@@ -820,35 +773,32 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -820,35 +773,32 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/* time to take action on any delayed IPI flags */ /* time to take action on any delayed IPI flags */
spin_lock(&part->IPI_lock); spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
ch->delayed_IPI_flags); ch->delayed_IPI_flags);
spin_unlock(&part->IPI_lock); spin_unlock(&part->IPI_lock);
} }
ch->delayed_IPI_flags = 0; ch->delayed_IPI_flags = 0;
} }
} }
/* /*
* Process a change in the channel's remote connection state. * Process a change in the channel's remote connection state.
*/ */
static void static void
xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
u8 IPI_flags) u8 IPI_flags)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct xpc_openclose_args *args = struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number]; &part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number]; struct xpc_channel *ch = &part->channels[ch_number];
enum xpc_retval reason; enum xpc_retval reason;
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
again: again:
if ((ch->flags & XPC_C_DISCONNECTED) && if ((ch->flags & XPC_C_DISCONNECTED) &&
(ch->flags & XPC_C_WDISCONNECT)) { (ch->flags & XPC_C_WDISCONNECT)) {
/* /*
* Delay processing IPI flags until thread waiting disconnect * Delay processing IPI flags until thread waiting disconnect
* has had a chance to see that the channel is disconnected. * has had a chance to see that the channel is disconnected.
...@@ -858,7 +808,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -858,7 +808,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
return; return;
} }
if (IPI_flags & XPC_IPI_CLOSEREQUEST) { if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
...@@ -890,13 +839,14 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -890,13 +839,14 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
if (ch->flags & XPC_C_DISCONNECTED) { if (ch->flags & XPC_C_DISCONNECTED) {
if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
ch_number) & XPC_IPI_OPENREQUEST)) { ch_number) &
XPC_IPI_OPENREQUEST)) {
DBUG_ON(ch->delayed_IPI_flags != 0); DBUG_ON(ch->delayed_IPI_flags != 0);
spin_lock(&part->IPI_lock); spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, XPC_SET_IPI_FLAGS(part->local_IPI_amo,
ch_number, ch_number,
XPC_IPI_CLOSEREQUEST); XPC_IPI_CLOSEREQUEST);
spin_unlock(&part->IPI_lock); spin_unlock(&part->IPI_lock);
} }
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -921,11 +871,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -921,11 +871,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
if (!(ch->flags & XPC_C_DISCONNECTING)) { if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason; reason = args->reason;
if (reason <= xpcSuccess || reason > xpcUnknownReason) { if (reason <= xpcSuccess || reason > xpcUnknownReason)
reason = xpcUnknownReason; reason = xpcUnknownReason;
} else if (reason == xpcUnregistering) { else if (reason == xpcUnregistering)
reason = xpcOtherUnregistering; reason = xpcOtherUnregistering;
}
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
...@@ -937,7 +886,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -937,7 +886,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
xpc_process_disconnect(ch, &irq_flags); xpc_process_disconnect(ch, &irq_flags);
} }
if (IPI_flags & XPC_IPI_CLOSEREPLY) { if (IPI_flags & XPC_IPI_CLOSEREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
...@@ -953,12 +901,13 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -953,12 +901,13 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
& XPC_IPI_CLOSEREQUEST)) { & XPC_IPI_CLOSEREQUEST)) {
DBUG_ON(ch->delayed_IPI_flags != 0); DBUG_ON(ch->delayed_IPI_flags != 0);
spin_lock(&part->IPI_lock); spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, XPC_SET_IPI_FLAGS(part->local_IPI_amo,
ch_number, XPC_IPI_CLOSEREPLY); ch_number,
XPC_IPI_CLOSEREPLY);
spin_unlock(&part->IPI_lock); spin_unlock(&part->IPI_lock);
} }
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -973,7 +922,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -973,7 +922,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
} }
} }
if (IPI_flags & XPC_IPI_OPENREQUEST) { if (IPI_flags & XPC_IPI_OPENREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
...@@ -982,7 +930,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -982,7 +930,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
ch->partid, ch->number); ch->partid, ch->number);
if (part->act_state == XPC_P_DEACTIVATING || if (part->act_state == XPC_P_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) { (ch->flags & XPC_C_ROPENREQUEST)) {
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
return; return;
} }
...@@ -993,9 +941,9 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -993,9 +941,9 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
return; return;
} }
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST))); XPC_C_OPENREQUEST)));
DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_OPENREPLY | XPC_C_CONNECTED)); XPC_C_OPENREPLY | XPC_C_CONNECTED));
/* /*
* The meaningful OPENREQUEST connection state fields are: * The meaningful OPENREQUEST connection state fields are:
...@@ -1011,11 +959,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -1011,11 +959,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
ch->remote_nentries = args->local_nentries; ch->remote_nentries = args->local_nentries;
if (ch->flags & XPC_C_OPENREQUEST) { if (ch->flags & XPC_C_OPENREQUEST) {
if (args->msg_size != ch->msg_size) { if (args->msg_size != ch->msg_size) {
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
return; return;
} }
...@@ -1031,7 +978,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -1031,7 +978,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
xpc_process_connect(ch, &irq_flags); xpc_process_connect(ch, &irq_flags);
} }
if (IPI_flags & XPC_IPI_OPENREPLY) { if (IPI_flags & XPC_IPI_OPENREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
...@@ -1046,7 +992,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -1046,7 +992,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
} }
if (!(ch->flags & XPC_C_OPENREQUEST)) { if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
return; return;
} }
...@@ -1057,7 +1003,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -1057,7 +1003,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
/* /*
* The meaningful OPENREPLY connection state fields are: * The meaningful OPENREPLY connection state fields are:
* local_msgqueue_pa = physical address of remote * local_msgqueue_pa = physical address of remote
* partition's local_msgqueue * partition's local_msgqueue
* local_nentries = remote partition's local_nentries * local_nentries = remote partition's local_nentries
* remote_nentries = remote partition's remote_nentries * remote_nentries = remote partition's remote_nentries
*/ */
...@@ -1093,7 +1039,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, ...@@ -1093,7 +1039,6 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
} }
/* /*
* Attempt to establish a channel connection to a remote partition. * Attempt to establish a channel connection to a remote partition.
*/ */
...@@ -1103,10 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1103,10 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
unsigned long irq_flags; unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number]; struct xpc_registration *registration = &xpc_registrations[ch->number];
if (mutex_trylock(&registration->mutex) == 0)
if (mutex_trylock(&registration->mutex) == 0) {
return xpcRetry; return xpcRetry;
}
if (!XPC_CHANNEL_REGISTERED(ch->number)) { if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(&registration->mutex); mutex_unlock(&registration->mutex);
...@@ -1124,7 +1067,6 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1124,7 +1067,6 @@ xpc_connect_channel(struct xpc_channel *ch)
return ch->reason; return ch->reason;
} }
/* add info from the channel connect registration to the channel */ /* add info from the channel connect registration to the channel */
ch->kthreads_assigned_limit = registration->assigned_limit; ch->kthreads_assigned_limit = registration->assigned_limit;
...@@ -1154,7 +1096,7 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1154,7 +1096,7 @@ xpc_connect_channel(struct xpc_channel *ch)
*/ */
mutex_unlock(&registration->mutex); mutex_unlock(&registration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcUnequalMsgSizes; return xpcUnequalMsgSizes;
} }
...@@ -1169,7 +1111,6 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1169,7 +1111,6 @@ xpc_connect_channel(struct xpc_channel *ch)
mutex_unlock(&registration->mutex); mutex_unlock(&registration->mutex);
/* initiate the connection */ /* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
...@@ -1182,7 +1123,6 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1182,7 +1123,6 @@ xpc_connect_channel(struct xpc_channel *ch)
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Clear some of the msg flags in the local message queue. * Clear some of the msg flags in the local message queue.
*/ */
...@@ -1192,16 +1132,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) ...@@ -1192,16 +1132,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
struct xpc_msg *msg; struct xpc_msg *msg;
s64 get; s64 get;
get = ch->w_remote_GP.get; get = ch->w_remote_GP.get;
do { do {
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
(get % ch->local_nentries) * ch->msg_size); (get % ch->local_nentries) *
ch->msg_size);
msg->flags = 0; msg->flags = 0;
} while (++get < (volatile s64) ch->remote_GP.get); } while (++get < ch->remote_GP.get);
} }
/* /*
* Clear some of the msg flags in the remote message queue. * Clear some of the msg flags in the remote message queue.
*/ */
...@@ -1211,43 +1150,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) ...@@ -1211,43 +1150,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
struct xpc_msg *msg; struct xpc_msg *msg;
s64 put; s64 put;
put = ch->w_remote_GP.put; put = ch->w_remote_GP.put;
do { do {
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
(put % ch->remote_nentries) * ch->msg_size); (put % ch->remote_nentries) *
ch->msg_size);
msg->flags = 0; msg->flags = 0;
} while (++put < (volatile s64) ch->remote_GP.put); } while (++put < ch->remote_GP.put);
} }
static void static void
xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
{ {
struct xpc_channel *ch = &part->channels[ch_number]; struct xpc_channel *ch = &part->channels[ch_number];
int nmsgs_sent; int nmsgs_sent;
ch->remote_GP = part->remote_GPs[ch_number]; ch->remote_GP = part->remote_GPs[ch_number];
/* See what, if anything, has changed for each connected channel */ /* See what, if anything, has changed for each connected channel */
xpc_msgqueue_ref(ch); xpc_msgqueue_ref(ch);
if (ch->w_remote_GP.get == ch->remote_GP.get && if (ch->w_remote_GP.get == ch->remote_GP.get &&
ch->w_remote_GP.put == ch->remote_GP.put) { ch->w_remote_GP.put == ch->remote_GP.put) {
/* nothing changed since GPs were last pulled */ /* nothing changed since GPs were last pulled */
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
return; return;
} }
if (!(ch->flags & XPC_C_CONNECTED)){ if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
return; return;
} }
/* /*
* First check to see if messages recently sent by us have been * First check to see if messages recently sent by us have been
* received by the other side. (The remote GET value will have * received by the other side. (The remote GET value will have
...@@ -1269,7 +1204,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) ...@@ -1269,7 +1204,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
* received and delivered by the other side. * received and delivered by the other side.
*/ */
xpc_notify_senders(ch, xpcMsgDelivered, xpc_notify_senders(ch, xpcMsgDelivered,
ch->remote_GP.get); ch->remote_GP.get);
} }
/* /*
...@@ -1288,12 +1223,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) ...@@ -1288,12 +1223,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
* If anyone was waiting for message queue entries to become * If anyone was waiting for message queue entries to become
* available, wake them up. * available, wake them up.
*/ */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq); wake_up(&ch->msg_allocate_wq);
}
} }
/* /*
* Now check for newly sent messages by the other side. (The remote * Now check for newly sent messages by the other side. (The remote
* PUT value will have changed since we last looked at it.) * PUT value will have changed since we last looked at it.)
...@@ -1318,16 +1251,14 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) ...@@ -1318,16 +1251,14 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
"delivered=%d, partid=%d, channel=%d\n", "delivered=%d, partid=%d, channel=%d\n",
nmsgs_sent, ch->partid, ch->number); nmsgs_sent, ch->partid, ch->number);
if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
xpc_activate_kthreads(ch, nmsgs_sent); xpc_activate_kthreads(ch, nmsgs_sent);
}
} }
} }
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
} }
void void
xpc_process_channel_activity(struct xpc_partition *part) xpc_process_channel_activity(struct xpc_partition *part)
{ {
...@@ -1337,7 +1268,6 @@ xpc_process_channel_activity(struct xpc_partition *part) ...@@ -1337,7 +1268,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
int ch_number; int ch_number;
u32 ch_flags; u32 ch_flags;
IPI_amo = xpc_get_IPI_flags(part); IPI_amo = xpc_get_IPI_flags(part);
/* /*
...@@ -1350,7 +1280,6 @@ xpc_process_channel_activity(struct xpc_partition *part) ...@@ -1350,7 +1280,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
for (ch_number = 0; ch_number < part->nchannels; ch_number++) { for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number]; ch = &part->channels[ch_number];
/* /*
* Process any open or close related IPI flags, and then deal * Process any open or close related IPI flags, and then deal
* with connecting or disconnecting the channel as required. * with connecting or disconnecting the channel as required.
...@@ -1358,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part) ...@@ -1358,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
xpc_process_openclose_IPI(part, ch_number, IPI_flags); xpc_process_openclose_IPI(part, ch_number, IPI_flags);
}
ch_flags = ch->flags; /* need an atomic snapshot of flags */ ch_flags = ch->flags; /* need an atomic snapshot of flags */
...@@ -1371,14 +1299,13 @@ xpc_process_channel_activity(struct xpc_partition *part) ...@@ -1371,14 +1299,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
continue; continue;
} }
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING)
continue; continue;
}
if (!(ch_flags & XPC_C_CONNECTED)) { if (!(ch_flags & XPC_C_CONNECTED)) {
if (!(ch_flags & XPC_C_OPENREQUEST)) { if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP); DBUG_ON(ch_flags & XPC_C_SETUP);
(void) xpc_connect_channel(ch); (void)xpc_connect_channel(ch);
} else { } else {
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_connect(ch, &irq_flags); xpc_process_connect(ch, &irq_flags);
...@@ -1387,20 +1314,17 @@ xpc_process_channel_activity(struct xpc_partition *part) ...@@ -1387,20 +1314,17 @@ xpc_process_channel_activity(struct xpc_partition *part)
continue; continue;
} }
/* /*
* Process any message related IPI flags, this may involve the * Process any message related IPI flags, this may involve the
* activation of kthreads to deliver any pending messages sent * activation of kthreads to deliver any pending messages sent
* from the other partition. * from the other partition.
*/ */
if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
xpc_process_msg_IPI(part, ch_number); xpc_process_msg_IPI(part, ch_number);
}
} }
} }
/* /*
* XPC's heartbeat code calls this function to inform XPC that a partition is * XPC's heartbeat code calls this function to inform XPC that a partition is
* going down. XPC responds by tearing down the XPartition Communication * going down. XPC responds by tearing down the XPartition Communication
...@@ -1417,7 +1341,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) ...@@ -1417,7 +1341,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
int ch_number; int ch_number;
struct xpc_channel *ch; struct xpc_channel *ch;
dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
XPC_PARTID(part), reason); XPC_PARTID(part), reason);
...@@ -1426,7 +1349,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) ...@@ -1426,7 +1349,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
return; return;
} }
/* disconnect channels associated with the partition going down */ /* disconnect channels associated with the partition going down */
for (ch_number = 0; ch_number < part->nchannels; ch_number++) { for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
...@@ -1446,7 +1368,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) ...@@ -1446,7 +1368,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
xpc_part_deref(part); xpc_part_deref(part);
} }
/* /*
* Teardown the infrastructure necessary to support XPartition Communication * Teardown the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one. * between the specified remote partition and the local one.
...@@ -1456,7 +1377,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part) ...@@ -1456,7 +1377,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
{ {
partid_t partid = XPC_PARTID(part); partid_t partid = XPC_PARTID(part);
/* /*
* We start off by making this partition inaccessible to local * We start off by making this partition inaccessible to local
* processes by marking it as no longer setup. Then we make it * processes by marking it as no longer setup. Then we make it
...@@ -1473,9 +1393,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part) ...@@ -1473,9 +1393,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
xpc_vars_part[partid].magic = 0; xpc_vars_part[partid].magic = 0;
free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
/* /*
* Before proceeding with the teardown we have to wait until all * Before proceeding with the teardown we have to wait until all
...@@ -1483,7 +1401,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part) ...@@ -1483,7 +1401,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
*/ */
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
/* now we can begin tearing down the infrastructure */ /* now we can begin tearing down the infrastructure */
part->setup_state = XPC_P_TORNDOWN; part->setup_state = XPC_P_TORNDOWN;
...@@ -1504,7 +1421,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part) ...@@ -1504,7 +1421,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
part->local_IPI_amo_va = NULL; part->local_IPI_amo_va = NULL;
} }
/* /*
* Called by XP at the time of channel connection registration to cause * Called by XP at the time of channel connection registration to cause
* XPC to establish connections to all currently active partitions. * XPC to establish connections to all currently active partitions.
...@@ -1516,7 +1432,6 @@ xpc_initiate_connect(int ch_number) ...@@ -1516,7 +1432,6 @@ xpc_initiate_connect(int ch_number)
struct xpc_partition *part; struct xpc_partition *part;
struct xpc_channel *ch; struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
...@@ -1535,7 +1450,6 @@ xpc_initiate_connect(int ch_number) ...@@ -1535,7 +1450,6 @@ xpc_initiate_connect(int ch_number)
} }
} }
void void
xpc_connected_callout(struct xpc_channel *ch) xpc_connected_callout(struct xpc_channel *ch)
{ {
...@@ -1546,14 +1460,13 @@ xpc_connected_callout(struct xpc_channel *ch) ...@@ -1546,14 +1460,13 @@ xpc_connected_callout(struct xpc_channel *ch)
"partid=%d, channel=%d\n", ch->partid, ch->number); "partid=%d, channel=%d\n", ch->partid, ch->number);
ch->func(xpcConnected, ch->partid, ch->number, ch->func(xpcConnected, ch->partid, ch->number,
(void *) (u64) ch->local_nentries, ch->key); (void *)(u64)ch->local_nentries, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number); "partid=%d, channel=%d\n", ch->partid, ch->number);
} }
} }
/* /*
* Called by XP at the time of channel connection unregistration to cause * Called by XP at the time of channel connection unregistration to cause
* XPC to teardown all current connections for the specified channel. * XPC to teardown all current connections for the specified channel.
...@@ -1575,7 +1488,6 @@ xpc_initiate_disconnect(int ch_number) ...@@ -1575,7 +1488,6 @@ xpc_initiate_disconnect(int ch_number)
struct xpc_partition *part; struct xpc_partition *part;
struct xpc_channel *ch; struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
/* initiate the channel disconnect for every active partition */ /* initiate the channel disconnect for every active partition */
...@@ -1592,7 +1504,7 @@ xpc_initiate_disconnect(int ch_number) ...@@ -1592,7 +1504,7 @@ xpc_initiate_disconnect(int ch_number)
ch->flags |= XPC_C_WDISCONNECT; ch->flags |= XPC_C_WDISCONNECT;
XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
&irq_flags); &irq_flags);
} }
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -1605,7 +1517,6 @@ xpc_initiate_disconnect(int ch_number) ...@@ -1605,7 +1517,6 @@ xpc_initiate_disconnect(int ch_number)
xpc_disconnect_wait(ch_number); xpc_disconnect_wait(ch_number);
} }
/* /*
* To disconnect a channel, and reflect it back to all who may be waiting. * To disconnect a channel, and reflect it back to all who may be waiting.
* *
...@@ -1617,16 +1528,15 @@ xpc_initiate_disconnect(int ch_number) ...@@ -1617,16 +1528,15 @@ xpc_initiate_disconnect(int ch_number)
*/ */
void void
xpc_disconnect_channel(const int line, struct xpc_channel *ch, xpc_disconnect_channel(const int line, struct xpc_channel *ch,
enum xpc_retval reason, unsigned long *irq_flags) enum xpc_retval reason, unsigned long *irq_flags)
{ {
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
DBUG_ON(!spin_is_locked(&ch->lock)); DBUG_ON(!spin_is_locked(&ch->lock));
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return; return;
}
DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
...@@ -1637,14 +1547,13 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, ...@@ -1637,14 +1547,13 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
/* some of these may not have been set */ /* some of these may not have been set */
ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED); XPC_C_CONNECTING | XPC_C_CONNECTED);
xpc_IPI_send_closerequest(ch, irq_flags); xpc_IPI_send_closerequest(ch, irq_flags);
if (channel_was_connected) { if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED; ch->flags |= XPC_C_WASCONNECTED;
}
spin_unlock_irqrestore(&ch->lock, *irq_flags); spin_unlock_irqrestore(&ch->lock, *irq_flags);
...@@ -1653,20 +1562,18 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, ...@@ -1653,20 +1562,18 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
wake_up_all(&ch->idle_wq); wake_up_all(&ch->idle_wq);
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
/* start a kthread that will do the xpcDisconnecting callout */ /* start a kthread that will do the xpcDisconnecting callout */
xpc_create_kthreads(ch, 1, 1); xpc_create_kthreads(ch, 1, 1);
} }
/* wake those waiting to allocate an entry from the local msg queue */ /* wake those waiting to allocate an entry from the local msg queue */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq); wake_up(&ch->msg_allocate_wq);
}
spin_lock_irqsave(&ch->lock, *irq_flags); spin_lock_irqsave(&ch->lock, *irq_flags);
} }
void void
xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
{ {
...@@ -1687,7 +1594,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) ...@@ -1687,7 +1594,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
} }
} }
/* /*
* Wait for a message entry to become available for the specified channel, * Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy. * but don't wait any longer than 1 jiffy.
...@@ -1697,9 +1603,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) ...@@ -1697,9 +1603,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
{ {
enum xpc_retval ret; enum xpc_retval ret;
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? DBUG_ON(ch->reason == xpcInterrupted);
return ch->reason; return ch->reason;
} }
...@@ -1709,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) ...@@ -1709,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason; ret = ch->reason;
DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? DBUG_ON(ch->reason == xpcInterrupted);
} else if (ret == 0) { } else if (ret == 0) {
ret = xpcTimeout; ret = xpcTimeout;
} else { } else {
...@@ -1719,20 +1624,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) ...@@ -1719,20 +1624,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
return ret; return ret;
} }
/* /*
* Allocate an entry for a message from the message queue associated with the * Allocate an entry for a message from the message queue associated with the
* specified channel. * specified channel.
*/ */
static enum xpc_retval static enum xpc_retval
xpc_allocate_msg(struct xpc_channel *ch, u32 flags, xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
struct xpc_msg **address_of_msg) struct xpc_msg **address_of_msg)
{ {
struct xpc_msg *msg; struct xpc_msg *msg;
enum xpc_retval ret; enum xpc_retval ret;
s64 put; s64 put;
/* this reference will be dropped in xpc_send_msg() */ /* this reference will be dropped in xpc_send_msg() */
xpc_msgqueue_ref(ch); xpc_msgqueue_ref(ch);
...@@ -1745,7 +1648,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, ...@@ -1745,7 +1648,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
return xpcNotConnected; return xpcNotConnected;
} }
/* /*
* Get the next available message entry from the local message queue. * Get the next available message entry from the local message queue.
* If none are available, we'll make sure that we grab the latest * If none are available, we'll make sure that we grab the latest
...@@ -1755,25 +1657,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, ...@@ -1755,25 +1657,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
while (1) { while (1) {
put = (volatile s64) ch->w_local_GP.put; put = ch->w_local_GP.put;
if (put - (volatile s64) ch->w_remote_GP.get < rmb(); /* guarantee that .put loads before .get */
ch->local_nentries) { if (put - ch->w_remote_GP.get < ch->local_nentries) {
/* There are available message entries. We need to try /* There are available message entries. We need to try
* to secure one for ourselves. We'll do this by trying * to secure one for ourselves. We'll do this by trying
* to increment w_local_GP.put as long as someone else * to increment w_local_GP.put as long as someone else
* doesn't beat us to it. If they do, we'll have to * doesn't beat us to it. If they do, we'll have to
* try again. * try again.
*/ */
if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
put) {
/* we got the entry referenced by put */ /* we got the entry referenced by put */
break; break;
} }
continue; /* try again */ continue; /* try again */
} }
/* /*
* There aren't any available msg entries at this time. * There aren't any available msg entries at this time.
* *
...@@ -1783,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, ...@@ -1783,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
* that will cause the IPI handler to fetch the latest * that will cause the IPI handler to fetch the latest
* GP values as if an IPI was sent by the other side. * GP values as if an IPI was sent by the other side.
*/ */
if (ret == xpcTimeout) { if (ret == xpcTimeout)
xpc_IPI_send_local_msgrequest(ch); xpc_IPI_send_local_msgrequest(ch);
}
if (flags & XPC_NOWAIT) { if (flags & XPC_NOWAIT) {
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
...@@ -1799,25 +1698,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, ...@@ -1799,25 +1698,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
} }
} }
/* get the message's address and initialize it */ /* get the message's address and initialize it */
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
(put % ch->local_nentries) * ch->msg_size); (put % ch->local_nentries) * ch->msg_size);
DBUG_ON(msg->flags != 0); DBUG_ON(msg->flags != 0);
msg->number = put; msg->number = put;
dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n", put + 1, "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
(void *) msg, msg->number, ch->partid, ch->number); (void *)msg, msg->number, ch->partid, ch->number);
*address_of_msg = msg; *address_of_msg = msg;
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Allocate an entry for a message from the message queue associated with the * Allocate an entry for a message from the message queue associated with the
* specified channel. NOTE that this routine can sleep waiting for a message * specified channel. NOTE that this routine can sleep waiting for a message
...@@ -1838,7 +1734,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) ...@@ -1838,7 +1734,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
enum xpc_retval ret = xpcUnknownReason; enum xpc_retval ret = xpcUnknownReason;
struct xpc_msg *msg = NULL; struct xpc_msg *msg = NULL;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
...@@ -1848,15 +1743,13 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) ...@@ -1848,15 +1743,13 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
xpc_part_deref(part); xpc_part_deref(part);
if (msg != NULL) { if (msg != NULL)
*payload = &msg->payload; *payload = &msg->payload;
}
} }
return ret; return ret;
} }
/* /*
* Now we actually send the messages that are ready to be sent by advancing * Now we actually send the messages that are ready to be sent by advancing
* the local message queue's Put value and then send an IPI to the recipient * the local message queue's Put value and then send an IPI to the recipient
...@@ -1869,20 +1762,18 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) ...@@ -1869,20 +1762,18 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
s64 put = initial_put + 1; s64 put = initial_put + 1;
int send_IPI = 0; int send_IPI = 0;
while (1) { while (1) {
while (1) { while (1) {
if (put == (volatile s64) ch->w_local_GP.put) { if (put == ch->w_local_GP.put)
break; break;
}
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
(put % ch->local_nentries) * ch->msg_size); (put % ch->local_nentries) *
ch->msg_size);
if (!(msg->flags & XPC_M_READY)) { if (!(msg->flags & XPC_M_READY))
break; break;
}
put++; put++;
} }
...@@ -1893,9 +1784,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) ...@@ -1893,9 +1784,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
} }
if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
initial_put) { initial_put) {
/* someone else beat us to it */ /* someone else beat us to it */
DBUG_ON((volatile s64) ch->local_GP->put < initial_put); DBUG_ON(ch->local_GP->put < initial_put);
break; break;
} }
...@@ -1914,12 +1805,10 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) ...@@ -1914,12 +1805,10 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
initial_put = put; initial_put = put;
} }
if (send_IPI) { if (send_IPI)
xpc_IPI_send_msgrequest(ch); xpc_IPI_send_msgrequest(ch);
}
} }
/* /*
* Common code that does the actual sending of the message by advancing the * Common code that does the actual sending of the message by advancing the
* local message queue's Put value and sends an IPI to the partition the * local message queue's Put value and sends an IPI to the partition the
...@@ -1927,16 +1816,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) ...@@ -1927,16 +1816,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
*/ */
static enum xpc_retval static enum xpc_retval
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
xpc_notify_func func, void *key) xpc_notify_func func, void *key)
{ {
enum xpc_retval ret = xpcSuccess; enum xpc_retval ret = xpcSuccess;
struct xpc_notify *notify = notify; struct xpc_notify *notify = notify;
s64 put, msg_number = msg->number; s64 put, msg_number = msg->number;
DBUG_ON(notify_type == XPC_N_CALL && func == NULL); DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
msg_number % ch->local_nentries); msg_number % ch->local_nentries);
DBUG_ON(msg->flags & XPC_M_READY); DBUG_ON(msg->flags & XPC_M_READY);
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
...@@ -1959,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, ...@@ -1959,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
notify->key = key; notify->key = key;
notify->type = notify_type; notify->type = notify_type;
// >>> is a mb() needed here? /* >>> is a mb() needed here? */
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
/* /*
...@@ -1970,7 +1858,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, ...@@ -1970,7 +1858,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
* the notify entry. * the notify entry.
*/ */
if (cmpxchg(&notify->type, notify_type, 0) == if (cmpxchg(&notify->type, notify_type, 0) ==
notify_type) { notify_type) {
atomic_dec(&ch->n_to_notify); atomic_dec(&ch->n_to_notify);
ret = ch->reason; ret = ch->reason;
} }
...@@ -1992,16 +1880,14 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, ...@@ -1992,16 +1880,14 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
/* see if the message is next in line to be sent, if so send it */ /* see if the message is next in line to be sent, if so send it */
put = ch->local_GP->put; put = ch->local_GP->put;
if (put == msg_number) { if (put == msg_number)
xpc_send_msgs(ch, put); xpc_send_msgs(ch, put);
}
/* drop the reference grabbed in xpc_allocate_msg() */ /* drop the reference grabbed in xpc_allocate_msg() */
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
return ret; return ret;
} }
/* /*
* Send a message previously allocated using xpc_initiate_allocate() on the * Send a message previously allocated using xpc_initiate_allocate() on the
* specified channel connected to the specified partition. * specified channel connected to the specified partition.
...@@ -2029,8 +1915,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload) ...@@ -2029,8 +1915,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xpc_retval ret; enum xpc_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number); partid, ch_number);
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
...@@ -2042,7 +1927,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload) ...@@ -2042,7 +1927,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
return ret; return ret;
} }
/* /*
* Send a message previously allocated using xpc_initiate_allocate on the * Send a message previously allocated using xpc_initiate_allocate on the
* specified channel connected to the specified partition. * specified channel connected to the specified partition.
...@@ -2075,14 +1959,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload) ...@@ -2075,14 +1959,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
*/ */
enum xpc_retval enum xpc_retval
xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
xpc_notify_func func, void *key) xpc_notify_func func, void *key)
{ {
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xpc_retval ret; enum xpc_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number); partid, ch_number);
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
...@@ -2091,11 +1974,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, ...@@ -2091,11 +1974,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
DBUG_ON(func == NULL); DBUG_ON(func == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
func, key); func, key);
return ret; return ret;
} }
static struct xpc_msg * static struct xpc_msg *
xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
{ {
...@@ -2105,7 +1987,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2105,7 +1987,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
u64 msg_offset; u64 msg_offset;
enum xpc_retval ret; enum xpc_retval ret;
if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */ /* we were interrupted by a signal */
return NULL; return NULL;
...@@ -2117,23 +1998,21 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2117,23 +1998,21 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
msg_index = ch->next_msg_to_pull % ch->remote_nentries; msg_index = ch->next_msg_to_pull % ch->remote_nentries;
DBUG_ON(ch->next_msg_to_pull >= DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
(volatile s64) ch->w_remote_GP.put); nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
nmsgs = (volatile s64) ch->w_remote_GP.put -
ch->next_msg_to_pull;
if (msg_index + nmsgs > ch->remote_nentries) { if (msg_index + nmsgs > ch->remote_nentries) {
/* ignore the ones that wrap the msg queue for now */ /* ignore the ones that wrap the msg queue for now */
nmsgs = ch->remote_nentries - msg_index; nmsgs = ch->remote_nentries - msg_index;
} }
msg_offset = msg_index * ch->msg_size; msg_offset = msg_index * ch->msg_size;
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
msg_offset); remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + msg_offset);
msg_offset);
if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
nmsgs * ch->msg_size)) != xpcSuccess) { nmsgs * ch->msg_size);
if (ret != xpcSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with" dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
" msg %ld from partition %d, channel=%d, " " msg %ld from partition %d, channel=%d, "
...@@ -2146,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2146,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
return NULL; return NULL;
} }
mb(); /* >>> this may not be needed, we're not sure */
ch->next_msg_to_pull += nmsgs; ch->next_msg_to_pull += nmsgs;
} }
...@@ -2155,12 +2032,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2155,12 +2032,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
/* return the message we were looking for */ /* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size; msg_offset = (get % ch->remote_nentries) * ch->msg_size;
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
return msg; return msg;
} }
/* /*
* Get a message to be delivered. * Get a message to be delivered.
*/ */
...@@ -2170,23 +2046,21 @@ xpc_get_deliverable_msg(struct xpc_channel *ch) ...@@ -2170,23 +2046,21 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
struct xpc_msg *msg = NULL; struct xpc_msg *msg = NULL;
s64 get; s64 get;
do { do {
if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING)
break; break;
}
get = (volatile s64) ch->w_local_GP.get; get = ch->w_local_GP.get;
if (get == (volatile s64) ch->w_remote_GP.put) { rmb(); /* guarantee that .get loads before .put */
if (get == ch->w_remote_GP.put)
break; break;
}
/* There are messages waiting to be pulled and delivered. /* There are messages waiting to be pulled and delivered.
* We need to try to secure one for ourselves. We'll do this * We need to try to secure one for ourselves. We'll do this
* by trying to increment w_local_GP.get and hope that no one * by trying to increment w_local_GP.get and hope that no one
* else beats us to it. If they do, we'll we'll simply have * else beats us to it. If they do, we'll we'll simply have
* to try again for the next one. * to try again for the next one.
*/ */
if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
/* we got the entry referenced by get */ /* we got the entry referenced by get */
...@@ -2211,7 +2085,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch) ...@@ -2211,7 +2085,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
return msg; return msg;
} }
/* /*
* Deliver a message to its intended recipient. * Deliver a message to its intended recipient.
*/ */
...@@ -2220,8 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch) ...@@ -2220,8 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
{ {
struct xpc_msg *msg; struct xpc_msg *msg;
msg = xpc_get_deliverable_msg(ch);
if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { if (msg != NULL) {
/* /*
* This ref is taken to protect the payload itself from being * This ref is taken to protect the payload itself from being
...@@ -2235,16 +2108,16 @@ xpc_deliver_msg(struct xpc_channel *ch) ...@@ -2235,16 +2108,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
if (ch->func != NULL) { if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n", "msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg->number, ch->partid, (void *)msg, msg->number, ch->partid,
ch->number); ch->number);
/* deliver the message to its intended recipient */ /* deliver the message to its intended recipient */
ch->func(xpcMsgReceived, ch->partid, ch->number, ch->func(xpcMsgReceived, ch->partid, ch->number,
&msg->payload, ch->key); &msg->payload, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n", "msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg->number, ch->partid, (void *)msg, msg->number, ch->partid,
ch->number); ch->number);
} }
...@@ -2252,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch) ...@@ -2252,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
} }
} }
/* /*
* Now we actually acknowledge the messages that have been delivered and ack'd * Now we actually acknowledge the messages that have been delivered and ack'd
* by advancing the cached remote message queue's Get value and if requested * by advancing the cached remote message queue's Get value and if requested
...@@ -2265,20 +2137,18 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) ...@@ -2265,20 +2137,18 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
s64 get = initial_get + 1; s64 get = initial_get + 1;
int send_IPI = 0; int send_IPI = 0;
while (1) { while (1) {
while (1) { while (1) {
if (get == (volatile s64) ch->w_local_GP.get) { if (get == ch->w_local_GP.get)
break; break;
}
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
(get % ch->remote_nentries) * ch->msg_size); (get % ch->remote_nentries) *
ch->msg_size);
if (!(msg->flags & XPC_M_DONE)) { if (!(msg->flags & XPC_M_DONE))
break; break;
}
msg_flags |= msg->flags; msg_flags |= msg->flags;
get++; get++;
...@@ -2290,10 +2160,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) ...@@ -2290,10 +2160,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
} }
if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
initial_get) { initial_get) {
/* someone else beat us to it */ /* someone else beat us to it */
DBUG_ON((volatile s64) ch->local_GP->get <= DBUG_ON(ch->local_GP->get <= initial_get);
initial_get);
break; break;
} }
...@@ -2312,12 +2181,10 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) ...@@ -2312,12 +2181,10 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
initial_get = get; initial_get = get;
} }
if (send_IPI) { if (send_IPI)
xpc_IPI_send_msgrequest(ch); xpc_IPI_send_msgrequest(ch);
}
} }
/* /*
* Acknowledge receipt of a delivered message. * Acknowledge receipt of a delivered message.
* *
...@@ -2343,17 +2210,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload) ...@@ -2343,17 +2210,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
s64 get, msg_number = msg->number; s64 get, msg_number = msg->number;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number]; ch = &part->channels[ch_number];
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg_number, ch->partid, ch->number); (void *)msg, msg_number, ch->partid, ch->number);
DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
msg_number % ch->remote_nentries); msg_number % ch->remote_nentries);
DBUG_ON(msg->flags & XPC_M_DONE); DBUG_ON(msg->flags & XPC_M_DONE);
msg->flags |= XPC_M_DONE; msg->flags |= XPC_M_DONE;
...@@ -2369,11 +2235,9 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload) ...@@ -2369,11 +2235,9 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
* been delivered. * been delivered.
*/ */
get = ch->local_GP->get; get = ch->local_GP->get;
if (get == msg_number) { if (get == msg_number)
xpc_acknowledge_msgs(ch, get, msg->flags); xpc_acknowledge_msgs(ch, get, msg->flags);
}
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
} }
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition Communication (XPC) support - standard version. * Cross Partition Communication (XPC) support - standard version.
* *
...@@ -44,23 +43,20 @@ ...@@ -44,23 +43,20 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kthread.h>
#include <linux/uaccess.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/uaccess.h> #include "xpc.h"
#include <asm/sn/xpc.h>
/* define two XPC debug device structures to be used with dev_dbg() et al */ /* define two XPC debug device structures to be used with dev_dbg() et al */
...@@ -81,10 +77,8 @@ struct device xpc_chan_dbg_subname = { ...@@ -81,10 +77,8 @@ struct device xpc_chan_dbg_subname = {
struct device *xpc_part = &xpc_part_dbg_subname; struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname; struct device *xpc_chan = &xpc_chan_dbg_subname;
static int xpc_kdebug_ignore; static int xpc_kdebug_ignore;
/* systune related variables for /proc/sys directories */ /* systune related variables for /proc/sys directories */
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
...@@ -96,61 +90,56 @@ static int xpc_hb_check_min_interval = 10; ...@@ -96,61 +90,56 @@ static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120; static int xpc_hb_check_max_interval = 120;
int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
static int xpc_disengage_request_min_timelimit = 0; static int xpc_disengage_request_min_timelimit; /* = 0 */
static int xpc_disengage_request_max_timelimit = 120; static int xpc_disengage_request_max_timelimit = 120;
static ctl_table xpc_sys_xpc_hb_dir[] = { static ctl_table xpc_sys_xpc_hb_dir[] = {
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "hb_interval", .procname = "hb_interval",
.data = &xpc_hb_interval, .data = &xpc_hb_interval,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec_minmax, .proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec, .strategy = &sysctl_intvec,
.extra1 = &xpc_hb_min_interval, .extra1 = &xpc_hb_min_interval,
.extra2 = &xpc_hb_max_interval .extra2 = &xpc_hb_max_interval},
},
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "hb_check_interval", .procname = "hb_check_interval",
.data = &xpc_hb_check_interval, .data = &xpc_hb_check_interval,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec_minmax, .proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec, .strategy = &sysctl_intvec,
.extra1 = &xpc_hb_check_min_interval, .extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval .extra2 = &xpc_hb_check_max_interval},
},
{} {}
}; };
static ctl_table xpc_sys_xpc_dir[] = { static ctl_table xpc_sys_xpc_dir[] = {
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "hb", .procname = "hb",
.mode = 0555, .mode = 0555,
.child = xpc_sys_xpc_hb_dir .child = xpc_sys_xpc_hb_dir},
},
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "disengage_request_timelimit", .procname = "disengage_request_timelimit",
.data = &xpc_disengage_request_timelimit, .data = &xpc_disengage_request_timelimit,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec_minmax, .proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec, .strategy = &sysctl_intvec,
.extra1 = &xpc_disengage_request_min_timelimit, .extra1 = &xpc_disengage_request_min_timelimit,
.extra2 = &xpc_disengage_request_max_timelimit .extra2 = &xpc_disengage_request_max_timelimit},
},
{} {}
}; };
static ctl_table xpc_sys_dir[] = { static ctl_table xpc_sys_dir[] = {
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "xpc", .procname = "xpc",
.mode = 0555, .mode = 0555,
.child = xpc_sys_xpc_dir .child = xpc_sys_xpc_dir},
},
{} {}
}; };
static struct ctl_table_header *xpc_sysctl; static struct ctl_table_header *xpc_sysctl;
...@@ -172,13 +161,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited); ...@@ -172,13 +161,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
/* notification that the xpc_discovery thread has exited */ /* notification that the xpc_discovery thread has exited */
static DECLARE_COMPLETION(xpc_discovery_exited); static DECLARE_COMPLETION(xpc_discovery_exited);
static struct timer_list xpc_hb_timer; static struct timer_list xpc_hb_timer;
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = { static struct notifier_block xpc_reboot_notifier = {
.notifier_call = xpc_system_reboot, .notifier_call = xpc_system_reboot,
...@@ -189,25 +175,22 @@ static struct notifier_block xpc_die_notifier = { ...@@ -189,25 +175,22 @@ static struct notifier_block xpc_die_notifier = {
.notifier_call = xpc_system_die, .notifier_call = xpc_system_die,
}; };
/* /*
* Timer function to enforce the timelimit on the partition disengage request. * Timer function to enforce the timelimit on the partition disengage request.
*/ */
static void static void
xpc_timeout_partition_disengage_request(unsigned long data) xpc_timeout_partition_disengage_request(unsigned long data)
{ {
struct xpc_partition *part = (struct xpc_partition *) data; struct xpc_partition *part = (struct xpc_partition *)data;
DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
(void) xpc_partition_disengaged(part); (void)xpc_partition_disengaged(part);
DBUG_ON(part->disengage_request_timeout != 0); DBUG_ON(part->disengage_request_timeout != 0);
DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
} }
/* /*
* Notify the heartbeat check thread that an IRQ has been received. * Notify the heartbeat check thread that an IRQ has been received.
*/ */
...@@ -219,7 +202,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id) ...@@ -219,7 +202,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* /*
* Timer to produce the heartbeat. The timer structures function is * Timer to produce the heartbeat. The timer structures function is
* already set when this is initially called. A tunable is used to * already set when this is initially called. A tunable is used to
...@@ -230,15 +212,13 @@ xpc_hb_beater(unsigned long dummy) ...@@ -230,15 +212,13 @@ xpc_hb_beater(unsigned long dummy)
{ {
xpc_vars->heartbeat++; xpc_vars->heartbeat++;
if (time_after_eq(jiffies, xpc_hb_check_timeout)) { if (time_after_eq(jiffies, xpc_hb_check_timeout))
wake_up_interruptible(&xpc_act_IRQ_wq); wake_up_interruptible(&xpc_act_IRQ_wq);
}
xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
add_timer(&xpc_hb_timer); add_timer(&xpc_hb_timer);
} }
/* /*
* This thread is responsible for nearly all of the partition * This thread is responsible for nearly all of the partition
* activation/deactivation. * activation/deactivation.
...@@ -248,27 +228,23 @@ xpc_hb_checker(void *ignore) ...@@ -248,27 +228,23 @@ xpc_hb_checker(void *ignore)
{ {
int last_IRQ_count = 0; int last_IRQ_count = 0;
int new_IRQ_count; int new_IRQ_count;
int force_IRQ=0; int force_IRQ = 0;
/* this thread was marked active by xpc_hb_init() */ /* this thread was marked active by xpc_hb_init() */
daemonize(XPC_HB_CHECK_THREAD_NAME);
set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
/* set our heartbeating to other partitions into motion */ /* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
xpc_hb_beater(0); xpc_hb_beater(0);
while (!(volatile int) xpc_exiting) { while (!xpc_exiting) {
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
"been received\n", "been received\n",
(int) (xpc_hb_check_timeout - jiffies), (int)(xpc_hb_check_timeout - jiffies),
atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
/* checking of remote heartbeats is skewed by IRQ handling */ /* checking of remote heartbeats is skewed by IRQ handling */
if (time_after_eq(jiffies, xpc_hb_check_timeout)) { if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
dev_dbg(xpc_part, "checking remote heartbeats\n"); dev_dbg(xpc_part, "checking remote heartbeats\n");
...@@ -282,7 +258,6 @@ xpc_hb_checker(void *ignore) ...@@ -282,7 +258,6 @@ xpc_hb_checker(void *ignore)
force_IRQ = 1; force_IRQ = 1;
} }
/* check for outstanding IRQs */ /* check for outstanding IRQs */
new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
...@@ -294,30 +269,30 @@ xpc_hb_checker(void *ignore) ...@@ -294,30 +269,30 @@ xpc_hb_checker(void *ignore)
last_IRQ_count += xpc_identify_act_IRQ_sender(); last_IRQ_count += xpc_identify_act_IRQ_sender();
if (last_IRQ_count < new_IRQ_count) { if (last_IRQ_count < new_IRQ_count) {
/* retry once to help avoid missing AMO */ /* retry once to help avoid missing AMO */
(void) xpc_identify_act_IRQ_sender(); (void)xpc_identify_act_IRQ_sender();
} }
last_IRQ_count = new_IRQ_count; last_IRQ_count = new_IRQ_count;
xpc_hb_check_timeout = jiffies + xpc_hb_check_timeout = jiffies +
(xpc_hb_check_interval * HZ); (xpc_hb_check_interval * HZ);
} }
/* wait for IRQ or timeout */ /* wait for IRQ or timeout */
(void) wait_event_interruptible(xpc_act_IRQ_wq, (void)wait_event_interruptible(xpc_act_IRQ_wq,
(last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || (last_IRQ_count <
time_after_eq(jiffies, xpc_hb_check_timeout) || atomic_read(&xpc_act_IRQ_rcvd)
(volatile int) xpc_exiting)); || time_after_eq(jiffies,
xpc_hb_check_timeout) ||
xpc_exiting));
} }
dev_dbg(xpc_part, "heartbeat checker is exiting\n"); dev_dbg(xpc_part, "heartbeat checker is exiting\n");
/* mark this thread as having exited */ /* mark this thread as having exited */
complete(&xpc_hb_checker_exited); complete(&xpc_hb_checker_exited);
return 0; return 0;
} }
/* /*
* This thread will attempt to discover other partitions to activate * This thread will attempt to discover other partitions to activate
* based on info provided by SAL. This new thread is short lived and * based on info provided by SAL. This new thread is short lived and
...@@ -326,8 +301,6 @@ xpc_hb_checker(void *ignore) ...@@ -326,8 +301,6 @@ xpc_hb_checker(void *ignore)
static int static int
xpc_initiate_discovery(void *ignore) xpc_initiate_discovery(void *ignore)
{ {
daemonize(XPC_DISCOVERY_THREAD_NAME);
xpc_discovery(); xpc_discovery();
dev_dbg(xpc_part, "discovery thread is exiting\n"); dev_dbg(xpc_part, "discovery thread is exiting\n");
...@@ -337,7 +310,6 @@ xpc_initiate_discovery(void *ignore) ...@@ -337,7 +310,6 @@ xpc_initiate_discovery(void *ignore)
return 0; return 0;
} }
/* /*
* Establish first contact with the remote partititon. This involves pulling * Establish first contact with the remote partititon. This involves pulling
* the XPC per partition variables from the remote partition and waiting for * the XPC per partition variables from the remote partition and waiting for
...@@ -348,7 +320,6 @@ xpc_make_first_contact(struct xpc_partition *part) ...@@ -348,7 +320,6 @@ xpc_make_first_contact(struct xpc_partition *part)
{ {
enum xpc_retval ret; enum xpc_retval ret;
while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
if (ret != xpcRetry) { if (ret != xpcRetry) {
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
...@@ -359,17 +330,15 @@ xpc_make_first_contact(struct xpc_partition *part) ...@@ -359,17 +330,15 @@ xpc_make_first_contact(struct xpc_partition *part)
"partition %d\n", XPC_PARTID(part)); "partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */ /* wait a 1/4 of a second or so */
(void) msleep_interruptible(250); (void)msleep_interruptible(250);
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING)
return part->reason; return part->reason;
}
} }
return xpc_mark_partition_active(part); return xpc_mark_partition_active(part);
} }
/* /*
* The first kthread assigned to a newly activated partition is the one * The first kthread assigned to a newly activated partition is the one
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
...@@ -386,12 +355,11 @@ static void ...@@ -386,12 +355,11 @@ static void
xpc_channel_mgr(struct xpc_partition *part) xpc_channel_mgr(struct xpc_partition *part)
{ {
while (part->act_state != XPC_P_DEACTIVATING || while (part->act_state != XPC_P_DEACTIVATING ||
atomic_read(&part->nchannels_active) > 0 || atomic_read(&part->nchannels_active) > 0 ||
!xpc_partition_disengaged(part)) { !xpc_partition_disengaged(part)) {
xpc_process_channel_activity(part); xpc_process_channel_activity(part);
/* /*
* Wait until we've been requested to activate kthreads or * Wait until we've been requested to activate kthreads or
* all of the channel's message queues have been torn down or * all of the channel's message queues have been torn down or
...@@ -406,21 +374,16 @@ xpc_channel_mgr(struct xpc_partition *part) ...@@ -406,21 +374,16 @@ xpc_channel_mgr(struct xpc_partition *part)
* wake him up. * wake him up.
*/ */
atomic_dec(&part->channel_mgr_requests); atomic_dec(&part->channel_mgr_requests);
(void) wait_event_interruptible(part->channel_mgr_wq, (void)wait_event_interruptible(part->channel_mgr_wq,
(atomic_read(&part->channel_mgr_requests) > 0 || (atomic_read(&part->channel_mgr_requests) > 0 ||
(volatile u64) part->local_IPI_amo != 0 || part->local_IPI_amo != 0 ||
((volatile u8) part->act_state == (part->act_state == XPC_P_DEACTIVATING &&
XPC_P_DEACTIVATING && atomic_read(&part->nchannels_active) == 0 &&
atomic_read(&part->nchannels_active) == 0 && xpc_partition_disengaged(part))));
xpc_partition_disengaged(part))));
atomic_set(&part->channel_mgr_requests, 1); atomic_set(&part->channel_mgr_requests, 1);
// >>> Does it need to wakeup periodically as well? In case we
// >>> miscalculated the #of kthreads to wakeup or create?
} }
} }
/* /*
* When XPC HB determines that a partition has come up, it will create a new * When XPC HB determines that a partition has come up, it will create a new
* kthread and that kthread will call this function to attempt to set up the * kthread and that kthread will call this function to attempt to set up the
...@@ -443,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part) ...@@ -443,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part)
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
if (xpc_setup_infrastructure(part) != xpcSuccess) { if (xpc_setup_infrastructure(part) != xpcSuccess)
return; return;
}
/* /*
* The kthread that XPC HB called us with will become the * The kthread that XPC HB called us with will become the
...@@ -454,27 +416,22 @@ xpc_partition_up(struct xpc_partition *part) ...@@ -454,27 +416,22 @@ xpc_partition_up(struct xpc_partition *part)
* has been dismantled. * has been dismantled.
*/ */
(void) xpc_part_ref(part); /* this will always succeed */ (void)xpc_part_ref(part); /* this will always succeed */
if (xpc_make_first_contact(part) == xpcSuccess) { if (xpc_make_first_contact(part) == xpcSuccess)
xpc_channel_mgr(part); xpc_channel_mgr(part);
}
xpc_part_deref(part); xpc_part_deref(part);
xpc_teardown_infrastructure(part); xpc_teardown_infrastructure(part);
} }
static int static int
xpc_activating(void *__partid) xpc_activating(void *__partid)
{ {
partid_t partid = (u64) __partid; partid_t partid = (u64)__partid;
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags; unsigned long irq_flags;
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
...@@ -496,21 +453,6 @@ xpc_activating(void *__partid) ...@@ -496,21 +453,6 @@ xpc_activating(void *__partid)
dev_dbg(xpc_part, "bringing partition %d up\n", partid); dev_dbg(xpc_part, "bringing partition %d up\n", partid);
daemonize("xpc%02d", partid);
/*
* This thread needs to run at a realtime priority to prevent a
* significant performance degradation.
*/
ret = sched_setscheduler(current, SCHED_FIFO, &param);
if (ret != 0) {
dev_warn(xpc_part, "unable to set pid %d to a realtime "
"priority, ret=%d\n", current->pid, ret);
}
/* allow this thread and its children to run on any CPU */
set_cpus_allowed(current, CPU_MASK_ALL);
/* /*
* Register the remote partition's AMOs with SAL so it can handle * Register the remote partition's AMOs with SAL so it can handle
* and cleanup errors within that address range should the remote * and cleanup errors within that address range should the remote
...@@ -522,9 +464,9 @@ xpc_activating(void *__partid) ...@@ -522,9 +464,9 @@ xpc_activating(void *__partid)
* reloads and system reboots. * reloads and system reboots.
*/ */
if (sn_register_xp_addr_region(part->remote_amos_page_pa, if (sn_register_xp_addr_region(part->remote_amos_page_pa,
PAGE_SIZE, 1) < 0) { PAGE_SIZE, 1) < 0) {
dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
"xp_addr region\n", partid); "xp_addr region\n", partid);
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE; part->act_state = XPC_P_INACTIVE;
...@@ -537,12 +479,11 @@ xpc_activating(void *__partid) ...@@ -537,12 +479,11 @@ xpc_activating(void *__partid)
xpc_allow_hb(partid, xpc_vars); xpc_allow_hb(partid, xpc_vars);
xpc_IPI_send_activated(part); xpc_IPI_send_activated(part);
/* /*
* xpc_partition_up() holds this thread and marks this partition as * xpc_partition_up() holds this thread and marks this partition as
* XPC_P_ACTIVE by calling xpc_hb_mark_active(). * XPC_P_ACTIVE by calling xpc_hb_mark_active().
*/ */
(void) xpc_partition_up(part); (void)xpc_partition_up(part);
xpc_disallow_hb(partid, xpc_vars); xpc_disallow_hb(partid, xpc_vars);
xpc_mark_partition_inactive(part); xpc_mark_partition_inactive(part);
...@@ -555,14 +496,12 @@ xpc_activating(void *__partid) ...@@ -555,14 +496,12 @@ xpc_activating(void *__partid)
return 0; return 0;
} }
void void
xpc_activate_partition(struct xpc_partition *part) xpc_activate_partition(struct xpc_partition *part)
{ {
partid_t partid = XPC_PARTID(part); partid_t partid = XPC_PARTID(part);
unsigned long irq_flags; unsigned long irq_flags;
pid_t pid; struct task_struct *kthread;
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
...@@ -573,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part) ...@@ -573,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part)
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
partid);
if (unlikely(pid <= 0)) { if (IS_ERR(kthread)) {
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE; part->act_state = XPC_P_INACTIVE;
XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
...@@ -583,12 +522,11 @@ xpc_activate_partition(struct xpc_partition *part) ...@@ -583,12 +522,11 @@ xpc_activate_partition(struct xpc_partition *part)
} }
} }
/* /*
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
* than one partition, we use an AMO_t structure per partition to indicate * than one partition, we use an AMO_t structure per partition to indicate
* whether a partition has sent an IPI or not. >>> If it has, then wake up the * whether a partition has sent an IPI or not. If it has, then wake up the
* associated kthread to handle it. * associated kthread to handle it.
* *
* All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
...@@ -603,10 +541,9 @@ xpc_activate_partition(struct xpc_partition *part) ...@@ -603,10 +541,9 @@ xpc_activate_partition(struct xpc_partition *part)
irqreturn_t irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id) xpc_notify_IRQ_handler(int irq, void *dev_id)
{ {
partid_t partid = (partid_t) (u64) dev_id; partid_t partid = (partid_t) (u64)dev_id;
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
if (xpc_part_ref(part)) { if (xpc_part_ref(part)) {
...@@ -617,7 +554,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id) ...@@ -617,7 +554,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* /*
* Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
* because the write to their associated IPI amo completed after the IRQ/IPI * because the write to their associated IPI amo completed after the IRQ/IPI
...@@ -630,13 +566,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part) ...@@ -630,13 +566,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
xpc_check_for_channel_activity(part); xpc_check_for_channel_activity(part);
part->dropped_IPI_timer.expires = jiffies + part->dropped_IPI_timer.expires = jiffies +
XPC_P_DROPPED_IPI_WAIT; XPC_P_DROPPED_IPI_WAIT;
add_timer(&part->dropped_IPI_timer); add_timer(&part->dropped_IPI_timer);
xpc_part_deref(part); xpc_part_deref(part);
} }
} }
void void
xpc_activate_kthreads(struct xpc_channel *ch, int needed) xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{ {
...@@ -644,7 +579,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) ...@@ -644,7 +579,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
int assigned = atomic_read(&ch->kthreads_assigned); int assigned = atomic_read(&ch->kthreads_assigned);
int wakeup; int wakeup;
DBUG_ON(needed <= 0); DBUG_ON(needed <= 0);
if (idle > 0) { if (idle > 0) {
...@@ -658,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) ...@@ -658,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
wake_up_nr(&ch->idle_wq, wakeup); wake_up_nr(&ch->idle_wq, wakeup);
} }
if (needed <= 0) { if (needed <= 0)
return; return;
}
if (needed + assigned > ch->kthreads_assigned_limit) { if (needed + assigned > ch->kthreads_assigned_limit) {
needed = ch->kthreads_assigned_limit - assigned; needed = ch->kthreads_assigned_limit - assigned;
// >>>should never be less than 0 if (needed <= 0)
if (needed <= 0) {
return; return;
}
} }
dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
...@@ -676,7 +607,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) ...@@ -676,7 +607,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
xpc_create_kthreads(ch, needed, 0); xpc_create_kthreads(ch, needed, 0);
} }
/* /*
* This function is where XPC's kthreads wait for messages to deliver. * This function is where XPC's kthreads wait for messages to deliver.
*/ */
...@@ -686,15 +616,13 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) ...@@ -686,15 +616,13 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
do { do {
/* deliver messages to their intended recipients */ /* deliver messages to their intended recipients */
while ((volatile s64) ch->w_local_GP.get < while (ch->w_local_GP.get < ch->w_remote_GP.put &&
(volatile s64) ch->w_remote_GP.put && !(ch->flags & XPC_C_DISCONNECTING)) {
!((volatile u32) ch->flags &
XPC_C_DISCONNECTING)) {
xpc_deliver_msg(ch); xpc_deliver_msg(ch);
} }
if (atomic_inc_return(&ch->kthreads_idle) > if (atomic_inc_return(&ch->kthreads_idle) >
ch->kthreads_idle_limit) { ch->kthreads_idle_limit) {
/* too many idle kthreads on this channel */ /* too many idle kthreads on this channel */
atomic_dec(&ch->kthreads_idle); atomic_dec(&ch->kthreads_idle);
break; break;
...@@ -703,20 +631,17 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) ...@@ -703,20 +631,17 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
dev_dbg(xpc_chan, "idle kthread calling " dev_dbg(xpc_chan, "idle kthread calling "
"wait_event_interruptible_exclusive()\n"); "wait_event_interruptible_exclusive()\n");
(void) wait_event_interruptible_exclusive(ch->idle_wq, (void)wait_event_interruptible_exclusive(ch->idle_wq,
((volatile s64) ch->w_local_GP.get < (ch->w_local_GP.get < ch->w_remote_GP.put ||
(volatile s64) ch->w_remote_GP.put || (ch->flags & XPC_C_DISCONNECTING)));
((volatile u32) ch->flags &
XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle); atomic_dec(&ch->kthreads_idle);
} while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); } while (!(ch->flags & XPC_C_DISCONNECTING));
} }
static int static int
xpc_daemonize_kthread(void *args) xpc_kthread_start(void *args)
{ {
partid_t partid = XPC_UNPACK_ARG1(args); partid_t partid = XPC_UNPACK_ARG1(args);
u16 ch_number = XPC_UNPACK_ARG2(args); u16 ch_number = XPC_UNPACK_ARG2(args);
...@@ -725,9 +650,6 @@ xpc_daemonize_kthread(void *args) ...@@ -725,9 +650,6 @@ xpc_daemonize_kthread(void *args)
int n_needed; int n_needed;
unsigned long irq_flags; unsigned long irq_flags;
daemonize("xpc%02dc%d", partid, ch_number);
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
partid, ch_number); partid, ch_number);
...@@ -756,10 +678,9 @@ xpc_daemonize_kthread(void *args) ...@@ -756,10 +678,9 @@ xpc_daemonize_kthread(void *args)
* need one less than total #of messages to deliver. * need one less than total #of messages to deliver.
*/ */
n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
if (n_needed > 0 && if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
!(ch->flags & XPC_C_DISCONNECTING)) {
xpc_activate_kthreads(ch, n_needed); xpc_activate_kthreads(ch, n_needed);
}
} else { } else {
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
} }
...@@ -771,7 +692,7 @@ xpc_daemonize_kthread(void *args) ...@@ -771,7 +692,7 @@ xpc_daemonize_kthread(void *args)
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
ch->flags |= XPC_C_DISCONNECTINGCALLOUT; ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -798,7 +719,6 @@ xpc_daemonize_kthread(void *args) ...@@ -798,7 +719,6 @@ xpc_daemonize_kthread(void *args)
return 0; return 0;
} }
/* /*
* For each partition that XPC has established communications with, there is * For each partition that XPC has established communications with, there is
* a minimum of one kernel thread assigned to perform any operation that * a minimum of one kernel thread assigned to perform any operation that
...@@ -813,13 +733,12 @@ xpc_daemonize_kthread(void *args) ...@@ -813,13 +733,12 @@ xpc_daemonize_kthread(void *args)
*/ */
void void
xpc_create_kthreads(struct xpc_channel *ch, int needed, xpc_create_kthreads(struct xpc_channel *ch, int needed,
int ignore_disconnecting) int ignore_disconnecting)
{ {
unsigned long irq_flags; unsigned long irq_flags;
pid_t pid;
u64 args = XPC_PACK_ARGS(ch->partid, ch->number); u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition *part = &xpc_partitions[ch->partid];
struct task_struct *kthread;
while (needed-- > 0) { while (needed-- > 0) {
...@@ -832,7 +751,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -832,7 +751,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
/* kthreads assigned had gone to zero */ /* kthreads assigned had gone to zero */
BUG_ON(!(ch->flags & BUG_ON(!(ch->flags &
XPC_C_DISCONNECTINGCALLOUT_MADE)); XPC_C_DISCONNECTINGCALLOUT_MADE));
break; break;
} }
...@@ -843,11 +762,12 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -843,11 +762,12 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
if (atomic_inc_return(&part->nchannels_engaged) == 1) if (atomic_inc_return(&part->nchannels_engaged) == 1)
xpc_mark_partition_engaged(part); xpc_mark_partition_engaged(part);
} }
(void) xpc_part_ref(part); (void)xpc_part_ref(part);
xpc_msgqueue_ref(ch); xpc_msgqueue_ref(ch);
pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); kthread = kthread_run(xpc_kthread_start, (void *)args,
if (pid < 0) { "xpc%02dc%d", ch->partid, ch->number);
if (IS_ERR(kthread)) {
/* the fork failed */ /* the fork failed */
/* /*
...@@ -857,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -857,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
* to this channel are blocked in the channel's * to this channel are blocked in the channel's
* registerer, because the only thing that will unblock * registerer, because the only thing that will unblock
* them is the xpcDisconnecting callout that this * them is the xpcDisconnecting callout that this
* failed kernel_thread would have made. * failed kthread_run() would have made.
*/ */
if (atomic_dec_return(&ch->kthreads_assigned) == 0 && if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
...@@ -869,7 +789,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -869,7 +789,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
xpc_part_deref(part); xpc_part_deref(part);
if (atomic_read(&ch->kthreads_assigned) < if (atomic_read(&ch->kthreads_assigned) <
ch->kthreads_idle_limit) { ch->kthreads_idle_limit) {
/* /*
* Flag this as an error only if we have an * Flag this as an error only if we have an
* insufficient #of kthreads for the channel * insufficient #of kthreads for the channel
...@@ -877,17 +797,14 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, ...@@ -877,17 +797,14 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
*/ */
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
} }
break; break;
} }
ch->kthreads_created++; // >>> temporary debug only!!!
} }
} }
void void
xpc_disconnect_wait(int ch_number) xpc_disconnect_wait(int ch_number)
{ {
...@@ -897,14 +814,12 @@ xpc_disconnect_wait(int ch_number) ...@@ -897,14 +814,12 @@ xpc_disconnect_wait(int ch_number)
struct xpc_channel *ch; struct xpc_channel *ch;
int wakeup_channel_mgr; int wakeup_channel_mgr;
/* now wait for all callouts to the caller's function to cease */ /* now wait for all callouts to the caller's function to cease */
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (!xpc_part_ref(part)) { if (!xpc_part_ref(part))
continue; continue;
}
ch = &part->channels[ch_number]; ch = &part->channels[ch_number];
...@@ -923,7 +838,8 @@ xpc_disconnect_wait(int ch_number) ...@@ -923,7 +838,8 @@ xpc_disconnect_wait(int ch_number)
if (part->act_state != XPC_P_DEACTIVATING) { if (part->act_state != XPC_P_DEACTIVATING) {
spin_lock(&part->IPI_lock); spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, XPC_SET_IPI_FLAGS(part->local_IPI_amo,
ch->number, ch->delayed_IPI_flags); ch->number,
ch->delayed_IPI_flags);
spin_unlock(&part->IPI_lock); spin_unlock(&part->IPI_lock);
wakeup_channel_mgr = 1; wakeup_channel_mgr = 1;
} }
...@@ -933,15 +849,13 @@ xpc_disconnect_wait(int ch_number) ...@@ -933,15 +849,13 @@ xpc_disconnect_wait(int ch_number)
ch->flags &= ~XPC_C_WDISCONNECT; ch->flags &= ~XPC_C_WDISCONNECT;
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
if (wakeup_channel_mgr) { if (wakeup_channel_mgr)
xpc_wakeup_channel_mgr(part); xpc_wakeup_channel_mgr(part);
}
xpc_part_deref(part); xpc_part_deref(part);
} }
} }
static void static void
xpc_do_exit(enum xpc_retval reason) xpc_do_exit(enum xpc_retval reason)
{ {
...@@ -950,7 +864,6 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -950,7 +864,6 @@ xpc_do_exit(enum xpc_retval reason)
struct xpc_partition *part; struct xpc_partition *part;
unsigned long printmsg_time, disengage_request_timeout = 0; unsigned long printmsg_time, disengage_request_timeout = 0;
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
DBUG_ON(xpc_exiting == 1); DBUG_ON(xpc_exiting == 1);
...@@ -971,10 +884,8 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -971,10 +884,8 @@ xpc_do_exit(enum xpc_retval reason)
/* wait for the heartbeat checker thread to exit */ /* wait for the heartbeat checker thread to exit */
wait_for_completion(&xpc_hb_checker_exited); wait_for_completion(&xpc_hb_checker_exited);
/* sleep for a 1/3 of a second or so */ /* sleep for a 1/3 of a second or so */
(void) msleep_interruptible(300); (void)msleep_interruptible(300);
/* wait for all partitions to become inactive */ /* wait for all partitions to become inactive */
...@@ -988,7 +899,7 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -988,7 +899,7 @@ xpc_do_exit(enum xpc_retval reason)
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (xpc_partition_disengaged(part) && if (xpc_partition_disengaged(part) &&
part->act_state == XPC_P_INACTIVE) { part->act_state == XPC_P_INACTIVE) {
continue; continue;
} }
...@@ -997,47 +908,46 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -997,47 +908,46 @@ xpc_do_exit(enum xpc_retval reason)
XPC_DEACTIVATE_PARTITION(part, reason); XPC_DEACTIVATE_PARTITION(part, reason);
if (part->disengage_request_timeout > if (part->disengage_request_timeout >
disengage_request_timeout) { disengage_request_timeout) {
disengage_request_timeout = disengage_request_timeout =
part->disengage_request_timeout; part->disengage_request_timeout;
} }
} }
if (xpc_partition_engaged(-1UL)) { if (xpc_partition_engaged(-1UL)) {
if (time_after(jiffies, printmsg_time)) { if (time_after(jiffies, printmsg_time)) {
dev_info(xpc_part, "waiting for remote " dev_info(xpc_part, "waiting for remote "
"partitions to disengage, timeout in " "partitions to disengage, timeout in "
"%ld seconds\n", "%ld seconds\n",
(disengage_request_timeout - jiffies) (disengage_request_timeout - jiffies)
/ HZ); / HZ);
printmsg_time = jiffies + printmsg_time = jiffies +
(XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
printed_waiting_msg = 1; printed_waiting_msg = 1;
} }
} else if (active_part_count > 0) { } else if (active_part_count > 0) {
if (printed_waiting_msg) { if (printed_waiting_msg) {
dev_info(xpc_part, "waiting for local partition" dev_info(xpc_part, "waiting for local partition"
" to disengage\n"); " to disengage\n");
printed_waiting_msg = 0; printed_waiting_msg = 0;
} }
} else { } else {
if (!xpc_disengage_request_timedout) { if (!xpc_disengage_request_timedout) {
dev_info(xpc_part, "all partitions have " dev_info(xpc_part, "all partitions have "
"disengaged\n"); "disengaged\n");
} }
break; break;
} }
/* sleep for a 1/3 of a second or so */ /* sleep for a 1/3 of a second or so */
(void) msleep_interruptible(300); (void)msleep_interruptible(300);
} while (1); } while (1);
DBUG_ON(xpc_partition_engaged(-1UL)); DBUG_ON(xpc_partition_engaged(-1UL));
/* indicate to others that our reserved page is uninitialized */ /* indicate to others that our reserved page is uninitialized */
xpc_rsvd_page->vars_pa = 0; xpc_rsvd_page->vars_pa = 0;
...@@ -1047,27 +957,24 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -1047,27 +957,24 @@ xpc_do_exit(enum xpc_retval reason)
if (reason == xpcUnloading) { if (reason == xpcUnloading) {
/* take ourselves off of the reboot_notifier_list */ /* take ourselves off of the reboot_notifier_list */
(void) unregister_reboot_notifier(&xpc_reboot_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier);
/* take ourselves off of the die_notifier list */ /* take ourselves off of the die_notifier list */
(void) unregister_die_notifier(&xpc_die_notifier); (void)unregister_die_notifier(&xpc_die_notifier);
} }
/* close down protections for IPI operations */ /* close down protections for IPI operations */
xpc_restrict_IPI_ops(); xpc_restrict_IPI_ops();
/* clear the interface to XPC's functions */ /* clear the interface to XPC's functions */
xpc_clear_interface(); xpc_clear_interface();
if (xpc_sysctl) { if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl); unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base); kfree(xpc_remote_copy_buffer_base);
} }
/* /*
* This function is called when the system is being rebooted. * This function is called when the system is being rebooted.
*/ */
...@@ -1076,7 +983,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) ...@@ -1076,7 +983,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{ {
enum xpc_retval reason; enum xpc_retval reason;
switch (event) { switch (event) {
case SYS_RESTART: case SYS_RESTART:
reason = xpcSystemReboot; reason = xpcSystemReboot;
...@@ -1095,7 +1001,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) ...@@ -1095,7 +1001,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
/* /*
* Notify other partitions to disengage from all references to our memory. * Notify other partitions to disengage from all references to our memory.
*/ */
...@@ -1107,17 +1012,16 @@ xpc_die_disengage(void) ...@@ -1107,17 +1012,16 @@ xpc_die_disengage(void)
unsigned long engaged; unsigned long engaged;
long time, printmsg_time, disengage_request_timeout; long time, printmsg_time, disengage_request_timeout;
/* keep xpc_hb_checker thread from doing anything (just in case) */ /* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1; xpc_exiting = 1;
xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
remote_vars_version)) { remote_vars_version)) {
/* just in case it was left set by an earlier XPC */ /* just in case it was left set by an earlier XPC */
xpc_clear_partition_engaged(1UL << partid); xpc_clear_partition_engaged(1UL << partid);
...@@ -1125,7 +1029,7 @@ xpc_die_disengage(void) ...@@ -1125,7 +1029,7 @@ xpc_die_disengage(void)
} }
if (xpc_partition_engaged(1UL << partid) || if (xpc_partition_engaged(1UL << partid) ||
part->act_state != XPC_P_INACTIVE) { part->act_state != XPC_P_INACTIVE) {
xpc_request_partition_disengage(part); xpc_request_partition_disengage(part);
xpc_mark_partition_disengaged(part); xpc_mark_partition_disengaged(part);
xpc_IPI_send_disengage(part); xpc_IPI_send_disengage(part);
...@@ -1134,9 +1038,9 @@ xpc_die_disengage(void) ...@@ -1134,9 +1038,9 @@ xpc_die_disengage(void)
time = rtc_time(); time = rtc_time();
printmsg_time = time + printmsg_time = time +
(XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
disengage_request_timeout = time + disengage_request_timeout = time +
(xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
/* wait for all other partitions to disengage from us */ /* wait for all other partitions to disengage from us */
...@@ -1152,8 +1056,8 @@ xpc_die_disengage(void) ...@@ -1152,8 +1056,8 @@ xpc_die_disengage(void)
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
if (engaged & (1UL << partid)) { if (engaged & (1UL << partid)) {
dev_info(xpc_part, "disengage from " dev_info(xpc_part, "disengage from "
"remote partition %d timed " "remote partition %d timed "
"out\n", partid); "out\n", partid);
} }
} }
break; break;
...@@ -1161,17 +1065,16 @@ xpc_die_disengage(void) ...@@ -1161,17 +1065,16 @@ xpc_die_disengage(void)
if (time >= printmsg_time) { if (time >= printmsg_time) {
dev_info(xpc_part, "waiting for remote partitions to " dev_info(xpc_part, "waiting for remote partitions to "
"disengage, timeout in %ld seconds\n", "disengage, timeout in %ld seconds\n",
(disengage_request_timeout - time) / (disengage_request_timeout - time) /
sn_rtc_cycles_per_second); sn_rtc_cycles_per_second);
printmsg_time = time + printmsg_time = time +
(XPC_DISENGAGE_PRINTMSG_INTERVAL * (XPC_DISENGAGE_PRINTMSG_INTERVAL *
sn_rtc_cycles_per_second); sn_rtc_cycles_per_second);
} }
} }
} }
/* /*
* This function is called when the system is being restarted or halted due * This function is called when the system is being restarted or halted due
* to some sort of system failure. If this is the case we need to notify the * to some sort of system failure. If this is the case we need to notify the
...@@ -1191,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ...@@ -1191,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
case DIE_KDEBUG_ENTER: case DIE_KDEBUG_ENTER:
/* Should lack of heartbeat be ignored by other partitions? */ /* Should lack of heartbeat be ignored by other partitions? */
if (!xpc_kdebug_ignore) { if (!xpc_kdebug_ignore)
break; break;
}
/* fall through */ /* fall through */
case DIE_MCA_MONARCH_ENTER: case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER: case DIE_INIT_MONARCH_ENTER:
...@@ -1203,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ...@@ -1203,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
case DIE_KDEBUG_LEAVE: case DIE_KDEBUG_LEAVE:
/* Is lack of heartbeat being ignored by other partitions? */ /* Is lack of heartbeat being ignored by other partitions? */
if (!xpc_kdebug_ignore) { if (!xpc_kdebug_ignore)
break; break;
}
/* fall through */ /* fall through */
case DIE_MCA_MONARCH_LEAVE: case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE: case DIE_INIT_MONARCH_LEAVE:
...@@ -1217,26 +1120,23 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ...@@ -1217,26 +1120,23 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
int __init int __init
xpc_init(void) xpc_init(void)
{ {
int ret; int ret;
partid_t partid; partid_t partid;
struct xpc_partition *part; struct xpc_partition *part;
pid_t pid; struct task_struct *kthread;
size_t buf_size; size_t buf_size;
if (!ia64_platform_is("sn2"))
if (!ia64_platform_is("sn2")) {
return -ENODEV; return -ENODEV;
}
buf_size = max(XPC_RP_VARS_SIZE, buf_size = max(XPC_RP_VARS_SIZE,
XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
GFP_KERNEL, &xpc_remote_copy_buffer_base); GFP_KERNEL,
&xpc_remote_copy_buffer_base);
if (xpc_remote_copy_buffer == NULL) if (xpc_remote_copy_buffer == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -1256,7 +1156,7 @@ xpc_init(void) ...@@ -1256,7 +1156,7 @@ xpc_init(void)
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
part->act_IRQ_rcvd = 0; part->act_IRQ_rcvd = 0;
spin_lock_init(&part->act_lock); spin_lock_init(&part->act_lock);
...@@ -1265,8 +1165,8 @@ xpc_init(void) ...@@ -1265,8 +1165,8 @@ xpc_init(void)
init_timer(&part->disengage_request_timer); init_timer(&part->disengage_request_timer);
part->disengage_request_timer.function = part->disengage_request_timer.function =
xpc_timeout_partition_disengage_request; xpc_timeout_partition_disengage_request;
part->disengage_request_timer.data = (unsigned long) part; part->disengage_request_timer.data = (unsigned long)part;
part->setup_state = XPC_P_UNSET; part->setup_state = XPC_P_UNSET;
init_waitqueue_head(&part->teardown_wq); init_waitqueue_head(&part->teardown_wq);
...@@ -1292,16 +1192,15 @@ xpc_init(void) ...@@ -1292,16 +1192,15 @@ xpc_init(void)
* but rather immediately process the interrupt. * but rather immediately process the interrupt.
*/ */
ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
"xpc hb", NULL); "xpc hb", NULL);
if (ret != 0) { if (ret != 0) {
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
"errno=%d\n", -ret); "errno=%d\n", -ret);
xpc_restrict_IPI_ops(); xpc_restrict_IPI_ops();
if (xpc_sysctl) { if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl); unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base); kfree(xpc_remote_copy_buffer_base);
return -EBUSY; return -EBUSY;
...@@ -1319,26 +1218,22 @@ xpc_init(void) ...@@ -1319,26 +1218,22 @@ xpc_init(void)
free_irq(SGI_XPC_ACTIVATE, NULL); free_irq(SGI_XPC_ACTIVATE, NULL);
xpc_restrict_IPI_ops(); xpc_restrict_IPI_ops();
if (xpc_sysctl) { if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl); unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base); kfree(xpc_remote_copy_buffer_base);
return -EBUSY; return -EBUSY;
} }
/* add ourselves to the reboot_notifier_list */ /* add ourselves to the reboot_notifier_list */
ret = register_reboot_notifier(&xpc_reboot_notifier); ret = register_reboot_notifier(&xpc_reboot_notifier);
if (ret != 0) { if (ret != 0)
dev_warn(xpc_part, "can't register reboot notifier\n"); dev_warn(xpc_part, "can't register reboot notifier\n");
}
/* add ourselves to the die_notifier list */ /* add ourselves to the die_notifier list */
ret = register_die_notifier(&xpc_die_notifier); ret = register_die_notifier(&xpc_die_notifier);
if (ret != 0) { if (ret != 0)
dev_warn(xpc_part, "can't register die notifier\n"); dev_warn(xpc_part, "can't register die notifier\n");
}
init_timer(&xpc_hb_timer); init_timer(&xpc_hb_timer);
xpc_hb_timer.function = xpc_hb_beater; xpc_hb_timer.function = xpc_hb_beater;
...@@ -1347,39 +1242,38 @@ xpc_init(void) ...@@ -1347,39 +1242,38 @@ xpc_init(void)
* The real work-horse behind xpc. This processes incoming * The real work-horse behind xpc. This processes incoming
* interrupts and monitors remote heartbeats. * interrupts and monitors remote heartbeats.
*/ */
pid = kernel_thread(xpc_hb_checker, NULL, 0); kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
if (pid < 0) { if (IS_ERR(kthread)) {
dev_err(xpc_part, "failed while forking hb check thread\n"); dev_err(xpc_part, "failed while forking hb check thread\n");
/* indicate to others that our reserved page is uninitialized */ /* indicate to others that our reserved page is uninitialized */
xpc_rsvd_page->vars_pa = 0; xpc_rsvd_page->vars_pa = 0;
/* take ourselves off of the reboot_notifier_list */ /* take ourselves off of the reboot_notifier_list */
(void) unregister_reboot_notifier(&xpc_reboot_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier);
/* take ourselves off of the die_notifier list */ /* take ourselves off of the die_notifier list */
(void) unregister_die_notifier(&xpc_die_notifier); (void)unregister_die_notifier(&xpc_die_notifier);
del_timer_sync(&xpc_hb_timer); del_timer_sync(&xpc_hb_timer);
free_irq(SGI_XPC_ACTIVATE, NULL); free_irq(SGI_XPC_ACTIVATE, NULL);
xpc_restrict_IPI_ops(); xpc_restrict_IPI_ops();
if (xpc_sysctl) { if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl); unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base); kfree(xpc_remote_copy_buffer_base);
return -EBUSY; return -EBUSY;
} }
/* /*
* Startup a thread that will attempt to discover other partitions to * Startup a thread that will attempt to discover other partitions to
* activate based on info provided by SAL. This new thread is short * activate based on info provided by SAL. This new thread is short
* lived and will exit once discovery is complete. * lived and will exit once discovery is complete.
*/ */
pid = kernel_thread(xpc_initiate_discovery, NULL, 0); kthread = kthread_run(xpc_initiate_discovery, NULL,
if (pid < 0) { XPC_DISCOVERY_THREAD_NAME);
if (IS_ERR(kthread)) {
dev_err(xpc_part, "failed while forking discovery thread\n"); dev_err(xpc_part, "failed while forking discovery thread\n");
/* mark this new thread as a non-starter */ /* mark this new thread as a non-starter */
...@@ -1389,7 +1283,6 @@ xpc_init(void) ...@@ -1389,7 +1283,6 @@ xpc_init(void)
return -EBUSY; return -EBUSY;
} }
/* set the interface to point at XPC's functions */ /* set the interface to point at XPC's functions */
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
xpc_initiate_allocate, xpc_initiate_send, xpc_initiate_allocate, xpc_initiate_send,
...@@ -1398,16 +1291,16 @@ xpc_init(void) ...@@ -1398,16 +1291,16 @@ xpc_init(void)
return 0; return 0;
} }
module_init(xpc_init);
module_init(xpc_init);
void __exit void __exit
xpc_exit(void) xpc_exit(void)
{ {
xpc_do_exit(xpcUnloading); xpc_do_exit(xpcUnloading);
} }
module_exit(xpc_exit);
module_exit(xpc_exit);
MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
...@@ -1415,17 +1308,16 @@ MODULE_LICENSE("GPL"); ...@@ -1415,17 +1308,16 @@ MODULE_LICENSE("GPL");
module_param(xpc_hb_interval, int, 0); module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
"heartbeat increments."); "heartbeat increments.");
module_param(xpc_hb_check_interval, int, 0); module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
"heartbeat checks."); "heartbeat checks.");
module_param(xpc_disengage_request_timelimit, int, 0); module_param(xpc_disengage_request_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
"for disengage request to complete."); "for disengage request to complete.");
module_param(xpc_kdebug_ignore, int, 0); module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
"other partitions when dropping into kdebug."); "other partitions when dropping into kdebug.");
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/ */
/* /*
* Cross Partition Communication (XPC) partition support. * Cross Partition Communication (XPC) partition support.
* *
...@@ -16,7 +15,6 @@ ...@@ -16,7 +15,6 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/cache.h> #include <linux/cache.h>
...@@ -28,13 +26,11 @@ ...@@ -28,13 +26,11 @@
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/nodepda.h> #include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/sn/xpc.h> #include "xpc.h"
/* XPC is exiting flag */ /* XPC is exiting flag */
int xpc_exiting; int xpc_exiting;
/* SH_IPI_ACCESS shub register value on startup */ /* SH_IPI_ACCESS shub register value on startup */
static u64 xpc_sh1_IPI_access; static u64 xpc_sh1_IPI_access;
static u64 xpc_sh2_IPI_access0; static u64 xpc_sh2_IPI_access0;
...@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1; ...@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
static u64 xpc_sh2_IPI_access2; static u64 xpc_sh2_IPI_access2;
static u64 xpc_sh2_IPI_access3; static u64 xpc_sh2_IPI_access3;
/* original protection values for each node */ /* original protection values for each node */
u64 xpc_prot_vec[MAX_NUMNODES]; u64 xpc_prot_vec[MAX_NUMNODES];
/* this partition's reserved page pointers */ /* this partition's reserved page pointers */
struct xpc_rsvd_page *xpc_rsvd_page; struct xpc_rsvd_page *xpc_rsvd_page;
static u64 *xpc_part_nasids; static u64 *xpc_part_nasids;
...@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part; ...@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
static int xp_nasid_mask_words; /* actual size in words of nasid mask */ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
/* /*
* For performance reasons, each entry of xpc_partitions[] is cacheline * For performance reasons, each entry of xpc_partitions[] is cacheline
* aligned. And xpc_partitions[] is padded with an additional entry at the * aligned. And xpc_partitions[] is padded with an additional entry at the
...@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */ ...@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
*/ */
struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
/* /*
* Generic buffer used to store a local copy of portions of a remote * Generic buffer used to store a local copy of portions of a remote
* partition's reserved page (either its header and part_nasids mask, * partition's reserved page (either its header and part_nasids mask,
...@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; ...@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
char *xpc_remote_copy_buffer; char *xpc_remote_copy_buffer;
void *xpc_remote_copy_buffer_base; void *xpc_remote_copy_buffer_base;
/* /*
* Guarantee that the kmalloc'd memory is cacheline aligned. * Guarantee that the kmalloc'd memory is cacheline aligned.
*/ */
...@@ -84,22 +75,21 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) ...@@ -84,22 +75,21 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{ {
/* see if kmalloc will give us cachline aligned memory by default */ /* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags); *base = kmalloc(size, flags);
if (*base == NULL) { if (*base == NULL)
return NULL; return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base; return *base;
}
kfree(*base); kfree(*base);
/* nope, we'll have to do it ourselves */ /* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags); *base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) { if (*base == NULL)
return NULL; return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
return (void *)L1_CACHE_ALIGN((u64)*base);
}
/* /*
* Given a nasid, get the physical address of the partition's reserved page * Given a nasid, get the physical address of the partition's reserved page
...@@ -117,25 +107,24 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -117,25 +107,24 @@ xpc_get_rsvd_page_pa(int nasid)
u64 buf_len = 0; u64 buf_len = 0;
void *buf_base = NULL; void *buf_base = NULL;
while (1) { while (1) {
status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
&len); &len);
dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
"0x%016lx, address=0x%016lx, len=0x%016lx\n", "0x%016lx, address=0x%016lx, len=0x%016lx\n",
status, cookie, rp_pa, len); status, cookie, rp_pa, len);
if (status != SALRET_MORE_PASSES) { if (status != SALRET_MORE_PASSES)
break; break;
}
if (L1_CACHE_ALIGN(len) > buf_len) { if (L1_CACHE_ALIGN(len) > buf_len) {
kfree(buf_base); kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len); buf_len = L1_CACHE_ALIGN(len);
buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
GFP_KERNEL, &buf_base); GFP_KERNEL,
&buf_base);
if (buf_base == NULL) { if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc " dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", buf_len); "len=0x%016lx\n", buf_len);
...@@ -145,7 +134,7 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -145,7 +134,7 @@ xpc_get_rsvd_page_pa(int nasid)
} }
bte_res = xp_bte_copy(rp_pa, buf, buf_len, bte_res = xp_bte_copy(rp_pa, buf, buf_len,
(BTE_NOTIFY | BTE_WACQUIRE), NULL); (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bte_res != BTE_SUCCESS) { if (bte_res != BTE_SUCCESS) {
dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
status = SALRET_ERROR; status = SALRET_ERROR;
...@@ -155,14 +144,13 @@ xpc_get_rsvd_page_pa(int nasid) ...@@ -155,14 +144,13 @@ xpc_get_rsvd_page_pa(int nasid)
kfree(buf_base); kfree(buf_base);
if (status != SALRET_OK) { if (status != SALRET_OK)
rp_pa = 0; rp_pa = 0;
}
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
return rp_pa; return rp_pa;
} }
/* /*
* Fill the partition reserved page with the information needed by * Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial * other partitions to discover we are alive and establish initial
...@@ -176,7 +164,6 @@ xpc_rsvd_page_init(void) ...@@ -176,7 +164,6 @@ xpc_rsvd_page_init(void)
u64 rp_pa, nasid_array = 0; u64 rp_pa, nasid_array = 0;
int i, ret; int i, ret;
/* get the local reserved page's address */ /* get the local reserved page's address */
preempt_disable(); preempt_disable();
...@@ -186,7 +173,7 @@ xpc_rsvd_page_init(void) ...@@ -186,7 +173,7 @@ xpc_rsvd_page_init(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n"); dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return NULL; return NULL;
} }
rp = (struct xpc_rsvd_page *) __va(rp_pa); rp = (struct xpc_rsvd_page *)__va(rp_pa);
if (rp->partid != sn_partition_id) { if (rp->partid != sn_partition_id) {
dev_err(xpc_part, "the reserved page's partid of %d should be " dev_err(xpc_part, "the reserved page's partid of %d should be "
...@@ -222,8 +209,9 @@ xpc_rsvd_page_init(void) ...@@ -222,8 +209,9 @@ xpc_rsvd_page_init(void)
* on subsequent loads of XPC. This AMO page is never freed, and its * on subsequent loads of XPC. This AMO page is never freed, and its
* memory protections are never restricted. * memory protections are never restricted.
*/ */
if ((amos_page = xpc_vars->amos_page) == NULL) { amos_page = xpc_vars->amos_page;
amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); if (amos_page == NULL) {
amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
if (amos_page == NULL) { if (amos_page == NULL) {
dev_err(xpc_part, "can't allocate page of AMOs\n"); dev_err(xpc_part, "can't allocate page of AMOs\n");
return NULL; return NULL;
...@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void) ...@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void)
* when xpc_allow_IPI_ops() is called via xpc_hb_init(). * when xpc_allow_IPI_ops() is called via xpc_hb_init().
*/ */
if (!enable_shub_wars_1_1()) { if (!enable_shub_wars_1_1()) {
ret = sn_change_memprotect(ia64_tpa((u64) amos_page), ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, PAGE_SIZE,
&nasid_array); SN_MEMPROT_ACCESS_CLASS_1,
&nasid_array);
if (ret != 0) { if (ret != 0) {
dev_err(xpc_part, "can't change memory " dev_err(xpc_part, "can't change memory "
"protections\n"); "protections\n");
uncached_free_page(__IA64_UNCACHED_OFFSET | uncached_free_page(__IA64_UNCACHED_OFFSET |
TO_PHYS((u64) amos_page)); TO_PHYS((u64)amos_page));
return NULL; return NULL;
} }
} }
} else if (!IS_AMO_ADDRESS((u64) amos_page)) { } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
/* /*
* EFI's XPBOOT can also set amos_page in the reserved page, * EFI's XPBOOT can also set amos_page in the reserved page,
* but it happens to leave it as an uncached physical address * but it happens to leave it as an uncached physical address
* and we need it to be an uncached virtual, so we'll have to * and we need it to be an uncached virtual, so we'll have to
* convert it. * convert it.
*/ */
if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
dev_err(xpc_part, "previously used amos_page address " dev_err(xpc_part, "previously used amos_page address "
"is bad = 0x%p\n", (void *) amos_page); "is bad = 0x%p\n", (void *)amos_page);
return NULL; return NULL;
} }
amos_page = (AMO_t *) TO_AMO((u64) amos_page); amos_page = (AMO_t *)TO_AMO((u64)amos_page);
} }
/* clear xpc_vars */ /* clear xpc_vars */
...@@ -267,22 +256,20 @@ xpc_rsvd_page_init(void) ...@@ -267,22 +256,20 @@ xpc_rsvd_page_init(void)
xpc_vars->act_nasid = cpuid_to_nasid(0); xpc_vars->act_nasid = cpuid_to_nasid(0);
xpc_vars->act_phys_cpuid = cpu_physical_id(0); xpc_vars->act_phys_cpuid = cpu_physical_id(0);
xpc_vars->vars_part_pa = __pa(xpc_vars_part); xpc_vars->vars_part_pa = __pa(xpc_vars_part);
xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
xpc_vars->amos_page = amos_page; /* save for next load of XPC */ xpc_vars->amos_page = amos_page; /* save for next load of XPC */
/* clear xpc_vars_part */ /* clear xpc_vars_part */
memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
XP_MAX_PARTITIONS); XP_MAX_PARTITIONS);
/* initialize the activate IRQ related AMO variables */ /* initialize the activate IRQ related AMO variables */
for (i = 0; i < xp_nasid_mask_words; i++) { for (i = 0; i < xp_nasid_mask_words; i++)
(void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
}
/* initialize the engaged remote partitions related AMO variables */ /* initialize the engaged remote partitions related AMO variables */
(void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
(void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
/* timestamp of when reserved page was setup by XPC */ /* timestamp of when reserved page was setup by XPC */
rp->stamp = CURRENT_TIME; rp->stamp = CURRENT_TIME;
...@@ -296,7 +283,6 @@ xpc_rsvd_page_init(void) ...@@ -296,7 +283,6 @@ xpc_rsvd_page_init(void)
return rp; return rp;
} }
/* /*
* Change protections to allow IPI operations (and AMO operations on * Change protections to allow IPI operations (and AMO operations on
* Shub 1.1 systems). * Shub 1.1 systems).
...@@ -307,39 +293,38 @@ xpc_allow_IPI_ops(void) ...@@ -307,39 +293,38 @@ xpc_allow_IPI_ops(void)
int node; int node;
int nasid; int nasid;
/* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
if (is_shub2()) { if (is_shub2()) {
xpc_sh2_IPI_access0 = xpc_sh2_IPI_access0 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
xpc_sh2_IPI_access1 = xpc_sh2_IPI_access1 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
xpc_sh2_IPI_access2 = xpc_sh2_IPI_access2 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
xpc_sh2_IPI_access3 = xpc_sh2_IPI_access3 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
for_each_online_node(node) { for_each_online_node(node) {
nasid = cnodeid_to_nasid(node); nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-1UL); -1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-1UL); -1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-1UL); -1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-1UL); -1UL);
} }
} else { } else {
xpc_sh1_IPI_access = xpc_sh1_IPI_access =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
for_each_online_node(node) { for_each_online_node(node) {
nasid = cnodeid_to_nasid(node); nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-1UL); -1UL);
/* /*
* Since the BIST collides with memory operations on * Since the BIST collides with memory operations on
...@@ -347,21 +332,23 @@ xpc_allow_IPI_ops(void) ...@@ -347,21 +332,23 @@ xpc_allow_IPI_ops(void)
*/ */
if (enable_shub_wars_1_1()) { if (enable_shub_wars_1_1()) {
/* open up everything */ /* open up everything */
xpc_prot_vec[node] = (u64) HUB_L((u64 *) xpc_prot_vec[node] = (u64)HUB_L((u64 *)
GLOBAL_MMR_ADDR(nasid, GLOBAL_MMR_ADDR
SH1_MD_DQLP_MMR_DIR_PRIVEC0)); (nasid,
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0));
SH1_MD_DQLP_MMR_DIR_PRIVEC0), HUB_S((u64 *)
-1UL); GLOBAL_MMR_ADDR(nasid,
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0),
SH1_MD_DQRP_MMR_DIR_PRIVEC0), -1UL);
-1UL); HUB_S((u64 *)
GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-1UL);
} }
} }
} }
} }
/* /*
* Restrict protections to disallow IPI operations (and AMO operations on * Restrict protections to disallow IPI operations (and AMO operations on
* Shub 1.1 systems). * Shub 1.1 systems).
...@@ -372,43 +359,41 @@ xpc_restrict_IPI_ops(void) ...@@ -372,43 +359,41 @@ xpc_restrict_IPI_ops(void)
int node; int node;
int nasid; int nasid;
/* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
if (is_shub2()) { if (is_shub2()) {
for_each_online_node(node) { for_each_online_node(node) {
nasid = cnodeid_to_nasid(node); nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
xpc_sh2_IPI_access0); xpc_sh2_IPI_access0);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
xpc_sh2_IPI_access1); xpc_sh2_IPI_access1);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
xpc_sh2_IPI_access2); xpc_sh2_IPI_access2);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
xpc_sh2_IPI_access3); xpc_sh2_IPI_access3);
} }
} else { } else {
for_each_online_node(node) { for_each_online_node(node) {
nasid = cnodeid_to_nasid(node); nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
xpc_sh1_IPI_access); xpc_sh1_IPI_access);
if (enable_shub_wars_1_1()) { if (enable_shub_wars_1_1()) {
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQLP_MMR_DIR_PRIVEC0), SH1_MD_DQLP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]); xpc_prot_vec[node]);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQRP_MMR_DIR_PRIVEC0), SH1_MD_DQRP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]); xpc_prot_vec[node]);
} }
} }
} }
} }
/* /*
* At periodic intervals, scan through all active partitions and ensure * At periodic intervals, scan through all active partitions and ensure
* their heartbeat is still active. If not, the partition is deactivated. * their heartbeat is still active. If not, the partition is deactivated.
...@@ -421,34 +406,31 @@ xpc_check_remote_hb(void) ...@@ -421,34 +406,31 @@ xpc_check_remote_hb(void)
partid_t partid; partid_t partid;
bte_result_t bres; bte_result_t bres;
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
if (xpc_exiting) { if (xpc_exiting)
break; break;
}
if (partid == sn_partition_id) { if (partid == sn_partition_id)
continue; continue;
}
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (part->act_state == XPC_P_INACTIVE || if (part->act_state == XPC_P_INACTIVE ||
part->act_state == XPC_P_DEACTIVATING) { part->act_state == XPC_P_DEACTIVATING) {
continue; continue;
} }
/* pull the remote_hb cache line */ /* pull the remote_hb cache line */
bres = xp_bte_copy(part->remote_vars_pa, bres = xp_bte_copy(part->remote_vars_pa,
(u64) remote_vars, (u64)remote_vars,
XPC_RP_VARS_SIZE, XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL); (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) { if (bres != BTE_SUCCESS) {
XPC_DEACTIVATE_PARTITION(part, XPC_DEACTIVATE_PARTITION(part,
xpc_map_bte_errors(bres)); xpc_map_bte_errors(bres));
continue; continue;
} }
...@@ -459,8 +441,8 @@ xpc_check_remote_hb(void) ...@@ -459,8 +441,8 @@ xpc_check_remote_hb(void)
remote_vars->heartbeating_to_mask); remote_vars->heartbeating_to_mask);
if (((remote_vars->heartbeat == part->last_heartbeat) && if (((remote_vars->heartbeat == part->last_heartbeat) &&
(remote_vars->heartbeat_offline == 0)) || (remote_vars->heartbeat_offline == 0)) ||
!xpc_hb_allowed(sn_partition_id, remote_vars)) { !xpc_hb_allowed(sn_partition_id, remote_vars)) {
XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
continue; continue;
...@@ -470,7 +452,6 @@ xpc_check_remote_hb(void) ...@@ -470,7 +452,6 @@ xpc_check_remote_hb(void)
} }
} }
/* /*
* Get a copy of a portion of the remote partition's rsvd page. * Get a copy of a portion of the remote partition's rsvd page.
* *
...@@ -480,59 +461,48 @@ xpc_check_remote_hb(void) ...@@ -480,59 +461,48 @@ xpc_check_remote_hb(void)
*/ */
static enum xpc_retval static enum xpc_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids, xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
{ {
int bres, i; int bres, i;
/* get the reserved page's physical address */ /* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid); *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0) { if (*remote_rp_pa == 0)
return xpcNoRsvdPageAddr; return xpcNoRsvdPageAddr;
}
/* pull over the reserved page header and part_nasids mask */ /* pull over the reserved page header and part_nasids mask */
bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
(BTE_NOTIFY | BTE_WACQUIRE), NULL); (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) { if (bres != BTE_SUCCESS)
return xpc_map_bte_errors(bres); return xpc_map_bte_errors(bres);
}
if (discovered_nasids != NULL) { if (discovered_nasids != NULL) {
u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
for (i = 0; i < xp_nasid_mask_words; i++)
for (i = 0; i < xp_nasid_mask_words; i++) {
discovered_nasids[i] |= remote_part_nasids[i]; discovered_nasids[i] |= remote_part_nasids[i];
}
} }
/* check that the partid is for another partition */ /* check that the partid is for another partition */
if (remote_rp->partid < 1 || if (remote_rp->partid < 1 ||
remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
return xpcInvalidPartid; return xpcInvalidPartid;
} }
if (remote_rp->partid == sn_partition_id) { if (remote_rp->partid == sn_partition_id)
return xpcLocalPartid; return xpcLocalPartid;
}
if (XPC_VERSION_MAJOR(remote_rp->version) != if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) { XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpcBadVersion; return xpcBadVersion;
} }
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Get a copy of the remote partition's XPC variables from the reserved page. * Get a copy of the remote partition's XPC variables from the reserved page.
* *
...@@ -544,34 +514,30 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) ...@@ -544,34 +514,30 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
{ {
int bres; int bres;
if (remote_vars_pa == 0)
if (remote_vars_pa == 0) {
return xpcVarsNotSet; return xpcVarsNotSet;
}
/* pull over the cross partition variables */ /* pull over the cross partition variables */
bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL); (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) { if (bres != BTE_SUCCESS)
return xpc_map_bte_errors(bres); return xpc_map_bte_errors(bres);
}
if (XPC_VERSION_MAJOR(remote_vars->version) != if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) { XPC_VERSION_MAJOR(XPC_V_VERSION)) {
return xpcBadVersion; return xpcBadVersion;
} }
return xpcSuccess; return xpcSuccess;
} }
/* /*
* Update the remote partition's info. * Update the remote partition's info.
*/ */
static void static void
xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
struct timespec *remote_rp_stamp, u64 remote_rp_pa, struct timespec *remote_rp_stamp, u64 remote_rp_pa,
u64 remote_vars_pa, struct xpc_vars *remote_vars) u64 remote_vars_pa, struct xpc_vars *remote_vars)
{ {
part->remote_rp_version = remote_rp_version; part->remote_rp_version = remote_rp_version;
dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
...@@ -613,7 +579,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, ...@@ -613,7 +579,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
part->remote_vars_version); part->remote_vars_version);
} }
/* /*
* Prior code has determined the nasid which generated an IPI. Inspect * Prior code has determined the nasid which generated an IPI. Inspect
* that nasid to determine if its partition needs to be activated or * that nasid to determine if its partition needs to be activated or
...@@ -643,54 +608,51 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -643,54 +608,51 @@ xpc_identify_act_IRQ_req(int nasid)
struct xpc_partition *part; struct xpc_partition *part;
enum xpc_retval ret; enum xpc_retval ret;
/* pull over the reserved page structure */ /* pull over the reserved page structure */
remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, " dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret); "which sent interrupt, reason=%d\n", nasid, ret);
return; return;
} }
remote_vars_pa = remote_rp->vars_pa; remote_vars_pa = remote_rp->vars_pa;
remote_rp_version = remote_rp->version; remote_rp_version = remote_rp->version;
if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
remote_rp_stamp = remote_rp->stamp; remote_rp_stamp = remote_rp->stamp;
}
partid = remote_rp->partid; partid = remote_rp->partid;
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
/* pull over the cross partition variables */ /* pull over the cross partition variables */
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret); "which sent interrupt, reason=%d\n", nasid, ret);
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
return; return;
} }
part->act_IRQ_rcvd++; part->act_IRQ_rcvd++;
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
"%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
remote_vars->heartbeat, remote_vars->heartbeating_to_mask); remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
if (xpc_partition_disengaged(part) && if (xpc_partition_disengaged(part) &&
part->act_state == XPC_P_INACTIVE) { part->act_state == XPC_P_INACTIVE) {
xpc_update_partition_info(part, remote_rp_version, xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa, &remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars); remote_vars_pa, remote_vars);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
if (xpc_partition_disengage_requested(1UL << partid)) { if (xpc_partition_disengage_requested(1UL << partid)) {
...@@ -714,16 +676,15 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -714,16 +676,15 @@ xpc_identify_act_IRQ_req(int nasid)
if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
remote_vars_version)); remote_vars_version));
if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
version)); version));
/* see if the other side rebooted */ /* see if the other side rebooted */
if (part->remote_amos_page_pa == if (part->remote_amos_page_pa ==
remote_vars->amos_page_pa && remote_vars->amos_page_pa &&
xpc_hb_allowed(sn_partition_id, xpc_hb_allowed(sn_partition_id, remote_vars)) {
remote_vars)) {
/* doesn't look that way, so ignore the IPI */ /* doesn't look that way, so ignore the IPI */
return; return;
} }
...@@ -735,8 +696,8 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -735,8 +696,8 @@ xpc_identify_act_IRQ_req(int nasid)
*/ */
xpc_update_partition_info(part, remote_rp_version, xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa, &remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars); remote_vars_pa, remote_vars);
part->reactivate_nasid = nasid; part->reactivate_nasid = nasid;
XPC_DEACTIVATE_PARTITION(part, xpcReactivating); XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
return; return;
...@@ -756,15 +717,15 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -756,15 +717,15 @@ xpc_identify_act_IRQ_req(int nasid)
xpc_clear_partition_disengage_request(1UL << partid); xpc_clear_partition_disengage_request(1UL << partid);
xpc_update_partition_info(part, remote_rp_version, xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa, &remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars); remote_vars_pa, remote_vars);
reactivate = 1; reactivate = 1;
} else { } else {
DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
&remote_rp_stamp); &remote_rp_stamp);
if (stamp_diff != 0) { if (stamp_diff != 0) {
DBUG_ON(stamp_diff >= 0); DBUG_ON(stamp_diff >= 0);
...@@ -775,17 +736,18 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -775,17 +736,18 @@ xpc_identify_act_IRQ_req(int nasid)
DBUG_ON(xpc_partition_engaged(1UL << partid)); DBUG_ON(xpc_partition_engaged(1UL << partid));
DBUG_ON(xpc_partition_disengage_requested(1UL << DBUG_ON(xpc_partition_disengage_requested(1UL <<
partid)); partid));
xpc_update_partition_info(part, remote_rp_version, xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa, &remote_rp_stamp,
remote_vars_pa, remote_vars); remote_rp_pa, remote_vars_pa,
remote_vars);
reactivate = 1; reactivate = 1;
} }
} }
if (part->disengage_request_timeout > 0 && if (part->disengage_request_timeout > 0 &&
!xpc_partition_disengaged(part)) { !xpc_partition_disengaged(part)) {
/* still waiting on other side to disengage from us */ /* still waiting on other side to disengage from us */
return; return;
} }
...@@ -795,12 +757,11 @@ xpc_identify_act_IRQ_req(int nasid) ...@@ -795,12 +757,11 @@ xpc_identify_act_IRQ_req(int nasid)
XPC_DEACTIVATE_PARTITION(part, xpcReactivating); XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
xpc_partition_disengage_requested(1UL << partid)) { xpc_partition_disengage_requested(1UL << partid)) {
XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
} }
} }
/* /*
* Loop through the activation AMO variables and process any bits * Loop through the activation AMO variables and process any bits
* which are set. Each bit indicates a nasid sending a partition * which are set. Each bit indicates a nasid sending a partition
...@@ -813,20 +774,17 @@ xpc_identify_act_IRQ_sender(void) ...@@ -813,20 +774,17 @@ xpc_identify_act_IRQ_sender(void)
{ {
int word, bit; int word, bit;
u64 nasid_mask; u64 nasid_mask;
u64 nasid; /* remote nasid */ u64 nasid; /* remote nasid */
int n_IRQs_detected = 0; int n_IRQs_detected = 0;
AMO_t *act_amos; AMO_t *act_amos;
act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
/* scan through act AMO variable looking for non-zero entries */ /* scan through act AMO variable looking for non-zero entries */
for (word = 0; word < xp_nasid_mask_words; word++) { for (word = 0; word < xp_nasid_mask_words; word++) {
if (xpc_exiting) { if (xpc_exiting)
break; break;
}
nasid_mask = xpc_IPI_receive(&act_amos[word]); nasid_mask = xpc_IPI_receive(&act_amos[word]);
if (nasid_mask == 0) { if (nasid_mask == 0) {
...@@ -837,7 +795,6 @@ xpc_identify_act_IRQ_sender(void) ...@@ -837,7 +795,6 @@ xpc_identify_act_IRQ_sender(void)
dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
nasid_mask); nasid_mask);
/* /*
* If this nasid has been added to the machine since * If this nasid has been added to the machine since
* our partition was reset, this will retain the * our partition was reset, this will retain the
...@@ -846,7 +803,6 @@ xpc_identify_act_IRQ_sender(void) ...@@ -846,7 +803,6 @@ xpc_identify_act_IRQ_sender(void)
*/ */
xpc_mach_nasids[word] |= nasid_mask; xpc_mach_nasids[word] |= nasid_mask;
/* locate the nasid(s) which sent interrupts */ /* locate the nasid(s) which sent interrupts */
for (bit = 0; bit < (8 * sizeof(u64)); bit++) { for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
...@@ -862,7 +818,6 @@ xpc_identify_act_IRQ_sender(void) ...@@ -862,7 +818,6 @@ xpc_identify_act_IRQ_sender(void)
return n_IRQs_detected; return n_IRQs_detected;
} }
/* /*
* See if the other side has responded to a partition disengage request * See if the other side has responded to a partition disengage request
* from us. * from us.
...@@ -873,11 +828,11 @@ xpc_partition_disengaged(struct xpc_partition *part) ...@@ -873,11 +828,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
partid_t partid = XPC_PARTID(part); partid_t partid = XPC_PARTID(part);
int disengaged; int disengaged;
disengaged = (xpc_partition_engaged(1UL << partid) == 0); disengaged = (xpc_partition_engaged(1UL << partid) == 0);
if (part->disengage_request_timeout) { if (part->disengage_request_timeout) {
if (!disengaged) { if (!disengaged) {
if (time_before(jiffies, part->disengage_request_timeout)) { if (time_before(jiffies,
part->disengage_request_timeout)) {
/* timelimit hasn't been reached yet */ /* timelimit hasn't been reached yet */
return 0; return 0;
} }
...@@ -888,7 +843,7 @@ xpc_partition_disengaged(struct xpc_partition *part) ...@@ -888,7 +843,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
*/ */
dev_info(xpc_part, "disengage from remote partition %d " dev_info(xpc_part, "disengage from remote partition %d "
"timed out\n", partid); "timed out\n", partid);
xpc_disengage_request_timedout = 1; xpc_disengage_request_timedout = 1;
xpc_clear_partition_engaged(1UL << partid); xpc_clear_partition_engaged(1UL << partid);
disengaged = 1; disengaged = 1;
...@@ -898,23 +853,20 @@ xpc_partition_disengaged(struct xpc_partition *part) ...@@ -898,23 +853,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
/* cancel the timer function, provided it's not us */ /* cancel the timer function, provided it's not us */
if (!in_interrupt()) { if (!in_interrupt()) {
del_singleshot_timer_sync(&part-> del_singleshot_timer_sync(&part->
disengage_request_timer); disengage_request_timer);
} }
DBUG_ON(part->act_state != XPC_P_DEACTIVATING && DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
part->act_state != XPC_P_INACTIVE); part->act_state != XPC_P_INACTIVE);
if (part->act_state != XPC_P_INACTIVE) { if (part->act_state != XPC_P_INACTIVE)
xpc_wakeup_channel_mgr(part); xpc_wakeup_channel_mgr(part);
}
if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
xpc_cancel_partition_disengage_request(part); xpc_cancel_partition_disengage_request(part);
}
} }
return disengaged; return disengaged;
} }
/* /*
* Mark specified partition as active. * Mark specified partition as active.
*/ */
...@@ -924,7 +876,6 @@ xpc_mark_partition_active(struct xpc_partition *part) ...@@ -924,7 +876,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
unsigned long irq_flags; unsigned long irq_flags;
enum xpc_retval ret; enum xpc_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
...@@ -940,17 +891,15 @@ xpc_mark_partition_active(struct xpc_partition *part) ...@@ -940,17 +891,15 @@ xpc_mark_partition_active(struct xpc_partition *part)
return ret; return ret;
} }
/* /*
* Notify XPC that the partition is down. * Notify XPC that the partition is down.
*/ */
void void
xpc_deactivate_partition(const int line, struct xpc_partition *part, xpc_deactivate_partition(const int line, struct xpc_partition *part,
enum xpc_retval reason) enum xpc_retval reason)
{ {
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&part->act_lock, irq_flags); spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_INACTIVE) { if (part->act_state == XPC_P_INACTIVE) {
...@@ -964,7 +913,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, ...@@ -964,7 +913,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
} }
if (part->act_state == XPC_P_DEACTIVATING) { if (part->act_state == XPC_P_DEACTIVATING) {
if ((part->reason == xpcUnloading && reason != xpcUnloading) || if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
reason == xpcReactivating) { reason == xpcReactivating) {
XPC_SET_REASON(part, reason, line); XPC_SET_REASON(part, reason, line);
} }
spin_unlock_irqrestore(&part->act_lock, irq_flags); spin_unlock_irqrestore(&part->act_lock, irq_flags);
...@@ -982,9 +931,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, ...@@ -982,9 +931,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
/* set a timelimit on the disengage request */ /* set a timelimit on the disengage request */
part->disengage_request_timeout = jiffies + part->disengage_request_timeout = jiffies +
(xpc_disengage_request_timelimit * HZ); (xpc_disengage_request_timelimit * HZ);
part->disengage_request_timer.expires = part->disengage_request_timer.expires =
part->disengage_request_timeout; part->disengage_request_timeout;
add_timer(&part->disengage_request_timer); add_timer(&part->disengage_request_timer);
} }
...@@ -994,7 +943,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, ...@@ -994,7 +943,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
xpc_partition_going_down(part, reason); xpc_partition_going_down(part, reason);
} }
/* /*
* Mark specified partition as inactive. * Mark specified partition as inactive.
*/ */
...@@ -1003,7 +951,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part) ...@@ -1003,7 +951,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
{ {
unsigned long irq_flags; unsigned long irq_flags;
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part)); XPC_PARTID(part));
...@@ -1013,7 +960,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part) ...@@ -1013,7 +960,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
part->remote_rp_pa = 0; part->remote_rp_pa = 0;
} }
/* /*
* SAL has provided a partition and machine mask. The partition mask * SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine * contains a bit for each even nasid in our partition. The machine
...@@ -1041,24 +987,22 @@ xpc_discovery(void) ...@@ -1041,24 +987,22 @@ xpc_discovery(void)
u64 *discovered_nasids; u64 *discovered_nasids;
enum xpc_retval ret; enum xpc_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xp_nasid_mask_bytes, xp_nasid_mask_bytes,
GFP_KERNEL, &remote_rp_base); GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL) { if (remote_rp == NULL)
return; return;
}
remote_vars = (struct xpc_vars *) remote_rp;
remote_vars = (struct xpc_vars *)remote_rp;
discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
GFP_KERNEL); GFP_KERNEL);
if (discovered_nasids == NULL) { if (discovered_nasids == NULL) {
kfree(remote_rp_base); kfree(remote_rp_base);
return; return;
} }
rp = (struct xpc_rsvd_page *) xpc_rsvd_page; rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
/* /*
* The term 'region' in this context refers to the minimum number of * The term 'region' in this context refers to the minimum number of
...@@ -1081,23 +1025,19 @@ xpc_discovery(void) ...@@ -1081,23 +1025,19 @@ xpc_discovery(void)
for (region = 0; region < max_regions; region++) { for (region = 0; region < max_regions; region++) {
if ((volatile int) xpc_exiting) { if (xpc_exiting)
break; break;
}
dev_dbg(xpc_part, "searching region %d\n", region); dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * region_size * 2); for (nasid = (region * region_size * 2);
nasid < ((region + 1) * region_size * 2); nasid < ((region + 1) * region_size * 2); nasid += 2) {
nasid += 2) {
if ((volatile int) xpc_exiting) { if (xpc_exiting)
break; break;
}
dev_dbg(xpc_part, "checking nasid %d\n", nasid); dev_dbg(xpc_part, "checking nasid %d\n", nasid);
if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
dev_dbg(xpc_part, "PROM indicates Nasid %d is " dev_dbg(xpc_part, "PROM indicates Nasid %d is "
"part of the local partition; skipping " "part of the local partition; skipping "
...@@ -1119,19 +1059,18 @@ xpc_discovery(void) ...@@ -1119,19 +1059,18 @@ xpc_discovery(void)
continue; continue;
} }
/* pull over the reserved page structure */ /* pull over the reserved page structure */
ret = xpc_get_remote_rp(nasid, discovered_nasids, ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa); remote_rp, &remote_rp_pa);
if (ret != xpcSuccess) { if (ret != xpcSuccess) {
dev_dbg(xpc_part, "unable to get reserved page " dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid, "from nasid %d, reason=%d\n", nasid,
ret); ret);
if (ret == xpcLocalPartid) { if (ret == xpcLocalPartid)
break; break;
}
continue; continue;
} }
...@@ -1140,7 +1079,6 @@ xpc_discovery(void) ...@@ -1140,7 +1079,6 @@ xpc_discovery(void)
partid = remote_rp->partid; partid = remote_rp->partid;
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
/* pull over the cross partition variables */ /* pull over the cross partition variables */
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
...@@ -1171,15 +1109,15 @@ xpc_discovery(void) ...@@ -1171,15 +1109,15 @@ xpc_discovery(void)
* get the same page for remote_act_amos_pa after * get the same page for remote_act_amos_pa after
* module reloads and system reboots. * module reloads and system reboots.
*/ */
if (sn_register_xp_addr_region( if (sn_register_xp_addr_region
remote_vars->amos_page_pa, (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
PAGE_SIZE, 1) < 0) { dev_dbg(xpc_part,
dev_dbg(xpc_part, "partition %d failed to " "partition %d failed to "
"register xp_addr region 0x%016lx\n", "register xp_addr region 0x%016lx\n",
partid, remote_vars->amos_page_pa); partid, remote_vars->amos_page_pa);
XPC_SET_REASON(part, xpcPhysAddrRegFailed, XPC_SET_REASON(part, xpcPhysAddrRegFailed,
__LINE__); __LINE__);
break; break;
} }
...@@ -1195,9 +1133,9 @@ xpc_discovery(void) ...@@ -1195,9 +1133,9 @@ xpc_discovery(void)
remote_vars->act_phys_cpuid); remote_vars->act_phys_cpuid);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
version)) { version)) {
part->remote_amos_page_pa = part->remote_amos_page_pa =
remote_vars->amos_page_pa; remote_vars->amos_page_pa;
xpc_mark_partition_disengaged(part); xpc_mark_partition_disengaged(part);
xpc_cancel_partition_disengage_request(part); xpc_cancel_partition_disengage_request(part);
} }
...@@ -1209,7 +1147,6 @@ xpc_discovery(void) ...@@ -1209,7 +1147,6 @@ xpc_discovery(void)
kfree(remote_rp_base); kfree(remote_rp_base);
} }
/* /*
* Given a partid, get the nasids owned by that partition from the * Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page. * remote partition's reserved page.
...@@ -1221,19 +1158,17 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) ...@@ -1221,19 +1158,17 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
u64 part_nasid_pa; u64 part_nasid_pa;
int bte_res; int bte_res;
part = &xpc_partitions[partid]; part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) { if (part->remote_rp_pa == 0)
return xpcPartitionDown; return xpcPartitionDown;
}
memset(nasid_mask, 0, XP_NASID_MASK_BYTES); memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
NULL);
return xpc_map_bte_errors(bte_res); return xpc_map_bte_errors(bte_res);
} }
...@@ -3,10 +3,9 @@ ...@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
*/ */
/* /*
* Cross Partition Network Interface (XPNET) support * Cross Partition Network Interface (XPNET) support
* *
...@@ -21,8 +20,8 @@ ...@@ -21,8 +20,8 @@
* *
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/ioport.h> #include <linux/ioport.h>
...@@ -36,10 +35,8 @@ ...@@ -36,10 +35,8 @@
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/types.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/sn/xp.h> #include "xp.h"
/* /*
* The message payload transferred by XPC. * The message payload transferred by XPC.
...@@ -79,7 +76,6 @@ struct xpnet_message { ...@@ -79,7 +76,6 @@ struct xpnet_message {
#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) #define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) #define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) #define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) #define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
...@@ -91,9 +87,9 @@ struct xpnet_message { ...@@ -91,9 +87,9 @@ struct xpnet_message {
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) #define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) #define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ #define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ #define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */ #define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \ #define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
...@@ -101,7 +97,6 @@ struct xpnet_message { ...@@ -101,7 +97,6 @@ struct xpnet_message {
#define XPNET_DEVICE_NAME "xp0" #define XPNET_DEVICE_NAME "xp0"
/* /*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer * When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the * of the following type is passed as a notification cookie. When the
...@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock); ...@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/* 32KB has been determined to be the ideal */ /* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL) #define XPNET_DEF_MTU (0x8000UL)
/* /*
* The partition id is encapsulated in the MAC address. The following * The partition id is encapsulated in the MAC address. The following
* define locates the octet the partid is in. * define locates the octet the partid is in.
...@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock); ...@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
#define XPNET_PARTID_OCTET 1 #define XPNET_PARTID_OCTET 1
#define XPNET_LICENSE_OCTET 2 #define XPNET_LICENSE_OCTET 2
/* /*
* Define the XPNET debug device structure that is to be used with dev_dbg(), * Define the XPNET debug device structure that is to be used with dev_dbg(),
* dev_err(), dev_warn(), and dev_info(). * dev_err(), dev_warn(), and dev_info().
...@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = { ...@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = {
}; };
struct device xpnet_dbg_subname = { struct device xpnet_dbg_subname = {
.bus_id = {0}, /* set to "" */ .bus_id = {0}, /* set to "" */
.driver = &xpnet_dbg_name .driver = &xpnet_dbg_name
}; };
...@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
struct sk_buff *skb; struct sk_buff *skb;
bte_result_t bret; bte_result_t bret;
struct xpnet_dev_private *priv = struct xpnet_dev_private *priv =
(struct xpnet_dev_private *) xpnet_device->priv; (struct xpnet_dev_private *)xpnet_device->priv;
if (!XPNET_VALID_MSG(msg)) { if (!XPNET_VALID_MSG(msg)) {
/* /*
* Packet with a different XPC version. Ignore. * Packet with a different XPC version. Ignore.
*/ */
xpc_received(partid, channel, (void *) msg); xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++; priv->stats.rx_errors++;
...@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore); msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */ /* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) { if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES); msg->size + L1_CACHE_BYTES);
xpc_received(partid, channel, (void *) msg); xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++; priv->stats.rx_errors++;
...@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
* Move the data over from the other side. * Move the data over from the other side.
*/ */
if ((XPNET_VERSION_MINOR(msg->version) == 1) && if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
(msg->embedded_bytes != 0)) { (msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data, "%lu)\n", skb->data, &msg->data,
(size_t) msg->embedded_bytes); (size_t)msg->embedded_bytes);
skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); skb_copy_to_linear_data(skb, &msg->data,
(size_t)msg->embedded_bytes);
} else { } else {
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
...@@ -244,16 +236,18 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -244,16 +236,18 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bret != BTE_SUCCESS) { if (bret != BTE_SUCCESS) {
// >>> Need better way of cleaning skb. Currently skb /*
// >>> appears in_use and we can't just call * >>> Need better way of cleaning skb. Currently skb
// >>> dev_kfree_skb. * >>> appears in_use and we can't just call
* >>> dev_kfree_skb.
*/
dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
"error=0x%x\n", (void *)msg->buf_pa, "error=0x%x\n", (void *)msg->buf_pa,
(void *)__pa((u64)skb->data & (void *)__pa((u64)skb->data &
~(L1_CACHE_BYTES - 1)), ~(L1_CACHE_BYTES - 1)),
msg->size, bret); msg->size, bret);
xpc_received(partid, channel, (void *) msg); xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++; priv->stats.rx_errors++;
...@@ -262,7 +256,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -262,7 +256,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
} }
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *) skb->head, "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len); skb->len);
...@@ -275,16 +269,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) ...@@ -275,16 +269,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len); skb_end_pointer(skb), skb->len);
xpnet_device->last_rx = jiffies; xpnet_device->last_rx = jiffies;
priv->stats.rx_packets++; priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len + ETH_HLEN; priv->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx_ni(skb); netif_rx_ni(skb);
xpc_received(partid, channel, (void *) msg); xpc_received(partid, channel, (void *)msg);
} }
/* /*
* This is the handler which XPC calls during any sort of change in * This is the handler which XPC calls during any sort of change in
* state or message reception on a connection. * state or message reception on a connection.
...@@ -295,20 +287,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, ...@@ -295,20 +287,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
{ {
long bp; long bp;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(channel != XPC_NET_CHANNEL); DBUG_ON(channel != XPC_NET_CHANNEL);
switch(reason) { switch (reason) {
case xpcMsgReceived: /* message received */ case xpcMsgReceived: /* message received */
DBUG_ON(data == NULL); DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *) data); xpnet_receive(partid, channel, (struct xpnet_message *)data);
break; break;
case xpcConnected: /* connection completed to a partition */ case xpcConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock); spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions |= 1UL << (partid -1 ); xpnet_broadcast_partitions |= 1UL << (partid - 1);
bp = xpnet_broadcast_partitions; bp = xpnet_broadcast_partitions;
spin_unlock_bh(&xpnet_broadcast_lock); spin_unlock_bh(&xpnet_broadcast_lock);
...@@ -321,13 +312,12 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, ...@@ -321,13 +312,12 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
default: default:
spin_lock_bh(&xpnet_broadcast_lock); spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
bp = xpnet_broadcast_partitions; bp = xpnet_broadcast_partitions;
spin_unlock_bh(&xpnet_broadcast_lock); spin_unlock_bh(&xpnet_broadcast_lock);
if (bp == 0) { if (bp == 0)
netif_carrier_off(xpnet_device); netif_carrier_off(xpnet_device);
}
dev_dbg(xpnet, "%s disconnected from partition %d; " dev_dbg(xpnet, "%s disconnected from partition %d; "
"xpnet_broadcast_partitions=0x%lx\n", "xpnet_broadcast_partitions=0x%lx\n",
...@@ -337,13 +327,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, ...@@ -337,13 +327,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
} }
} }
static int static int
xpnet_dev_open(struct net_device *dev) xpnet_dev_open(struct net_device *dev)
{ {
enum xpc_retval ret; enum xpc_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
...@@ -364,7 +352,6 @@ xpnet_dev_open(struct net_device *dev) ...@@ -364,7 +352,6 @@ xpnet_dev_open(struct net_device *dev)
return 0; return 0;
} }
static int static int
xpnet_dev_stop(struct net_device *dev) xpnet_dev_stop(struct net_device *dev)
{ {
...@@ -375,7 +362,6 @@ xpnet_dev_stop(struct net_device *dev) ...@@ -375,7 +362,6 @@ xpnet_dev_stop(struct net_device *dev)
return 0; return 0;
} }
static int static int
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
{ {
...@@ -392,7 +378,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) ...@@ -392,7 +378,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
/* /*
* Required for the net_device structure. * Required for the net_device structure.
*/ */
...@@ -402,7 +387,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) ...@@ -402,7 +387,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
return 0; return 0;
} }
/* /*
* Return statistics to the caller. * Return statistics to the caller.
*/ */
...@@ -411,13 +395,11 @@ xpnet_dev_get_stats(struct net_device *dev) ...@@ -411,13 +395,11 @@ xpnet_dev_get_stats(struct net_device *dev)
{ {
struct xpnet_dev_private *priv; struct xpnet_dev_private *priv;
priv = (struct xpnet_dev_private *)dev->priv;
priv = (struct xpnet_dev_private *) dev->priv;
return &priv->stats; return &priv->stats;
} }
/* /*
* Notification that the other end has received the message and * Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with * DMA'd the skb information. At this point, they are done with
...@@ -426,11 +408,9 @@ xpnet_dev_get_stats(struct net_device *dev) ...@@ -426,11 +408,9 @@ xpnet_dev_get_stats(struct net_device *dev)
*/ */
static void static void
xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
void *__qm) void *__qm)
{ {
struct xpnet_pending_msg *queued_msg = struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
(struct xpnet_pending_msg *) __qm;
DBUG_ON(queued_msg == NULL); DBUG_ON(queued_msg == NULL);
...@@ -439,14 +419,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, ...@@ -439,14 +419,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
if (atomic_dec_return(&queued_msg->use_count) == 0) { if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n", dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
(void *) queued_msg->skb->head); (void *)queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb); dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg); kfree(queued_msg);
} }
} }
/* /*
* Network layer has formatted a packet (skb) and is ready to place it * Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions * "on the wire". Prepare and send an xpnet_message to all partitions
...@@ -469,16 +448,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -469,16 +448,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct xpnet_dev_private *priv; struct xpnet_dev_private *priv;
u16 embedded_bytes; u16 embedded_bytes;
priv = (struct xpnet_dev_private *)dev->priv;
priv = (struct xpnet_dev_private *) dev->priv;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *) skb->head, "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len); skb->len);
/* /*
* The xpnet_pending_msg tracks how many outstanding * The xpnet_pending_msg tracks how many outstanding
* xpc_send_notifies are relying on this skb. When none * xpc_send_notifies are relying on this skb. When none
...@@ -487,16 +463,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -487,16 +463,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) { if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
"packet\n", sizeof(struct xpnet_pending_msg)); "packet\n", sizeof(struct xpnet_pending_msg));
priv->stats.tx_errors++; priv->stats.tx_errors++;
return -ENOMEM; return -ENOMEM;
} }
/* get the beginning of the first cacheline and end of last */ /* get the beginning of the first cacheline and end of last */
start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */ /* calculate how many bytes to embed in the XPC message */
...@@ -506,7 +481,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -506,7 +481,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
embedded_bytes = skb->len; embedded_bytes = skb->len;
} }
/* /*
* Since the send occurs asynchronously, we set the count to one * Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before * and begin sending. Any sends that happen to complete before
...@@ -517,14 +491,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -517,14 +491,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_set(&queued_msg->use_count, 1); atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb; queued_msg->skb = skb;
second_mac_octet = skb->data[XPNET_PARTID_OCTET]; second_mac_octet = skb->data[XPNET_PARTID_OCTET];
if (second_mac_octet == 0xff) { if (second_mac_octet == 0xff) {
/* we are being asked to broadcast to all partitions */ /* we are being asked to broadcast to all partitions */
dp = xpnet_broadcast_partitions; dp = xpnet_broadcast_partitions;
} else if (second_mac_octet != 0) { } else if (second_mac_octet != 0) {
dp = xpnet_broadcast_partitions & dp = xpnet_broadcast_partitions &
(1UL << (second_mac_octet - 1)); (1UL << (second_mac_octet - 1));
} else { } else {
/* 0 is an invalid partid. Ignore */ /* 0 is an invalid partid. Ignore */
dp = 0; dp = 0;
...@@ -543,7 +516,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -543,7 +516,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
dest_partid++) { dest_partid++) {
if (!(dp & (1UL << (dest_partid - 1)))) { if (!(dp & (1UL << (dest_partid - 1)))) {
/* not destined for this partition */ /* not destined for this partition */
continue; continue;
...@@ -552,20 +524,18 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -552,20 +524,18 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* remove this partition from the destinations mask */ /* remove this partition from the destinations mask */
dp &= ~(1UL << (dest_partid - 1)); dp &= ~(1UL << (dest_partid - 1));
/* found a partition to send to */ /* found a partition to send to */
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
XPC_NOWAIT, (void **)&msg); XPC_NOWAIT, (void **)&msg);
if (unlikely(ret != xpcSuccess)) { if (unlikely(ret != xpcSuccess))
continue; continue;
}
msg->embedded_bytes = embedded_bytes; msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) { if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED; msg->version = XPNET_VERSION_EMBED;
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
&msg->data, skb->data, (size_t) embedded_bytes); &msg->data, skb->data, (size_t)embedded_bytes);
skb_copy_from_linear_data(skb, &msg->data, skb_copy_from_linear_data(skb, &msg->data,
(size_t)embedded_bytes); (size_t)embedded_bytes);
} else { } else {
...@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
msg->magic = XPNET_MAGIC; msg->magic = XPNET_MAGIC;
msg->size = end_addr - start_addr; msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64) skb->data - start_addr; msg->leadin_ignore = (u64)skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = __pa(start_addr); msg->buf_pa = __pa(start_addr);
...@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore); msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count); atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
...@@ -592,14 +561,12 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -592,14 +561,12 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_dec(&queued_msg->use_count); atomic_dec(&queued_msg->use_count);
continue; continue;
} }
} }
if (atomic_dec_return(&queued_msg->use_count) == 0) { if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "no partitions to receive packet destined for " dev_dbg(xpnet, "no partitions to receive packet destined for "
"%d\n", dest_partid); "%d\n", dest_partid);
dev_kfree_skb(skb); dev_kfree_skb(skb);
kfree(queued_msg); kfree(queued_msg);
} }
...@@ -610,23 +577,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -610,23 +577,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
/* /*
* Deal with transmit timeouts coming from the network layer. * Deal with transmit timeouts coming from the network layer.
*/ */
static void static void
xpnet_dev_tx_timeout (struct net_device *dev) xpnet_dev_tx_timeout(struct net_device *dev)
{ {
struct xpnet_dev_private *priv; struct xpnet_dev_private *priv;
priv = (struct xpnet_dev_private *)dev->priv;
priv = (struct xpnet_dev_private *) dev->priv;
priv->stats.tx_errors++; priv->stats.tx_errors++;
return; return;
} }
static int __init static int __init
xpnet_init(void) xpnet_init(void)
{ {
...@@ -634,10 +598,8 @@ xpnet_init(void) ...@@ -634,10 +598,8 @@ xpnet_init(void)
u32 license_num; u32 license_num;
int result = -ENOMEM; int result = -ENOMEM;
if (!ia64_platform_is("sn2"))
if (!ia64_platform_is("sn2")) {
return -ENODEV; return -ENODEV;
}
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
...@@ -647,9 +609,8 @@ xpnet_init(void) ...@@ -647,9 +609,8 @@ xpnet_init(void)
*/ */
xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
XPNET_DEVICE_NAME, ether_setup); XPNET_DEVICE_NAME, ether_setup);
if (xpnet_device == NULL) { if (xpnet_device == NULL)
return -ENOMEM; return -ENOMEM;
}
netif_carrier_off(xpnet_device); netif_carrier_off(xpnet_device);
...@@ -672,7 +633,7 @@ xpnet_init(void) ...@@ -672,7 +633,7 @@ xpnet_init(void)
license_num = sn_partition_serial_number_val(); license_num = sn_partition_serial_number_val();
for (i = 3; i >= 0; i--) { for (i = 3; i >= 0; i--) {
xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
license_num & 0xff; license_num & 0xff;
license_num = license_num >> 8; license_num = license_num >> 8;
} }
...@@ -690,29 +651,27 @@ xpnet_init(void) ...@@ -690,29 +651,27 @@ xpnet_init(void)
xpnet_device->features = NETIF_F_NO_CSUM; xpnet_device->features = NETIF_F_NO_CSUM;
result = register_netdev(xpnet_device); result = register_netdev(xpnet_device);
if (result != 0) { if (result != 0)
free_netdev(xpnet_device); free_netdev(xpnet_device);
}
return result; return result;
} }
module_init(xpnet_init);
module_init(xpnet_init);
static void __exit static void __exit
xpnet_exit(void) xpnet_exit(void)
{ {
dev_info(xpnet, "unregistering network device %s\n", dev_info(xpnet, "unregistering network device %s\n",
xpnet_device[0].name); xpnet_device[0].name);
unregister_netdev(xpnet_device); unregister_netdev(xpnet_device);
free_netdev(xpnet_device); free_netdev(xpnet_device);
} }
module_exit(xpnet_exit);
module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...) ...@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
struct ia64_mca_notify_die { struct ia64_mca_notify_die {
struct ia64_sal_os_state *sos; struct ia64_sal_os_state *sos;
int *monarch_cpu; int *monarch_cpu;
int *data;
}; };
DECLARE_PER_CPU(u64, ia64_mca_pal_base); DECLARE_PER_CPU(u64, ia64_mca_pal_base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment