Commit 3801c3a6 authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

Many files:

  ia64: sn2 update
.del-sv.h~583ade34a48fc2a0:
  Delete: include/asm-ia64/sn/sv.h
.del-sv.c~37c4d6a1e76bdd1d:
  Delete: arch/ia64/sn/kernel/sv.c
parent 765a8447
......@@ -758,28 +758,6 @@ hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name)
return(rc);
}
/*
* hwgraph_path_lookup - return the handle for the given path.
*
*/
int
hwgraph_path_lookup(vertex_hdl_t start_vertex_handle,
char *lookup_path,
vertex_hdl_t *vertex_handle_ptr,
char **remainder)
{
*vertex_handle_ptr = hwgfs_find_handle(start_vertex_handle, /* start dir */
lookup_path, /* path */
0, /* major */
0, /* minor */
0, /* char | block */
1); /* traverse symlinks */
if (*vertex_handle_ptr == NULL)
return(-1);
else
return(0);
}
/*
* hwgraph_traverse - Find and return the handle starting from de.
*
......
......@@ -69,6 +69,7 @@ walk_parents_mkdir(
return error;
nd->dentry = lookup_create(nd, is_dir);
nd->flags |= LOOKUP_PARENT;
if (unlikely(IS_ERR(nd->dentry)))
return PTR_ERR(nd->dentry);
......
......@@ -87,11 +87,7 @@ hub_pio_init(vertex_hdl_t hubv)
hub_set_piomode(nasid, HUB_PIO_CONVEYOR);
mutex_spinlock_init(&hubinfo->h_bwlock);
/*
* If this lock can be acquired from interrupts or bh's, add SV_INTS or SV_BHS,
* respectively, to the flags here.
*/
sv_init(&hubinfo->h_bwwait, &hubinfo->h_bwlock, SV_ORDER_FIFO | SV_MON_SPIN);
init_waitqueue_head(&hubinfo->h_bwwait);
}
/*
......@@ -215,10 +211,16 @@ hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
if (flags & PIOMAP_NOSLEEP) {
bw_piomap = NULL;
goto done;
} else {
DECLARE_WAITQUEUE(wait, current);
spin_unlock(&hubinfo->h_bwlock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&hubinfo->h_bwwait, &wait);
schedule();
remove_wait_queue(&hubinfo->h_bwwait, &wait);
goto tryagain;
}
sv_wait(&hubinfo->h_bwwait, 0, 0);
goto tryagain;
}
}
......@@ -316,7 +318,7 @@ hub_piomap_free(hub_piomap_t hub_piomap)
} else
hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID;
(void)sv_signal(&hubinfo->h_bwwait);
wake_up(&hubinfo->h_bwwait);
}
mutex_spinunlock(&hubinfo->h_bwlock, s);
......
......@@ -474,11 +474,6 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
*/
if (bad) {
pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
"pcibr_try_set_device: mod blocked by %x\n",
bad, device_bits));
#endif
return bad;
}
}
......@@ -519,13 +514,7 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
}
pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
"pcibr_try_set_device: Device(%d): %x\n",
slot, new, device_bits));
#else
printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
#endif
return 0;
}
......@@ -824,14 +813,7 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
#ifdef PIC_LATER
/* This may be a loadable driver so lock out any pciconfig actions */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
#endif
pcibr_info->f_att_det_error = error;
pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
if (error) {
......@@ -839,11 +821,6 @@ pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
} else {
pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
}
#ifdef PIC_LATER
/* Release the bus lock */
mrunlock(pcibr_soft->bs_bus_lock);
#endif
}
/*
......@@ -875,14 +852,7 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
#ifdef PIC_LATER
/* This may be a loadable driver so lock out any pciconfig actions */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
#endif
pcibr_info->f_att_det_error = error;
pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
if (error) {
......@@ -890,11 +860,6 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
} else {
pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
}
#ifdef PIC_LATER
/* Release the bus lock */
mrunlock(pcibr_soft->bs_bus_lock);
#endif
}
/*
......@@ -1245,9 +1210,6 @@ pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
* Initialize bridge and bus locks
*/
spin_lock_init(&pcibr_soft->bs_lock);
#ifdef PIC_LATER
mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
#endif
/*
* If we have one, process the hints structure.
*/
......@@ -2250,17 +2212,10 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
* arguments fails so sprintf() it into a temporary string.
*/
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
"slot %d allocates DevIO(%d) Device(%d) set to %x\n",
space, space_desc, pci_addr, pci_addr + req_size - 1,
slot, win, win, devreg, device_bits);
#else
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
"slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
(unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
(unsigned int)slot, win, win, (unsigned long)devreg);
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
}
goto done;
......@@ -2291,11 +2246,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
pcibr_info->f_window[bar].w_devio_index = win;
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
"slot %d uses DevIO(%d)\n", space, space_desc, pci_addr,
pci_addr + req_size - 1, slot, win);
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
}
goto done;
......@@ -2392,14 +2342,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
;
} else if (bfo != 0) { /* we have a conflict. */
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %x, "
"was%s%s, want%s%s\n", space, space_desc,
bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
}
xio_addr = XIO_NOWHERE;
......@@ -2432,12 +2374,6 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
*bfp = bfn; /* record the assignment */
if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
#ifdef PIC_LATER
sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %x set "
"to%s%s\n", space, space_desc,
bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
#endif
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
}
}
......@@ -2740,10 +2676,6 @@ pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
size_t req_size)
{
pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
#ifdef PIC_LATER
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
#endif
pciio_piospace_t piosp;
unsigned long s;
char name[1024];
......@@ -3395,10 +3327,6 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
{
#ifdef PIC_LATER
pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
#endif
/*
* We could go through and invalidate ATEs here;
* for performance reasons, we don't.
......@@ -3719,72 +3647,8 @@ pcibr_provider_shutdown(vertex_hdl_t pcibr)
int
pcibr_reset(vertex_hdl_t conn)
{
#ifdef PIC_LATER
pciio_info_t pciio_info = pciio_info_get(conn);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t ctlreg;
unsigned cfgctl[8];
unsigned long s;
int f, nf;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
int win;
int error = 0;
#endif /* PIC_LATER */
BUG();
#ifdef PIC_LATER
if (pcibr_soft->bs_slot[pciio_slot].has_host) {
pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
}
if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
(pciio_slot <= pcibr_soft->bs_last_reset)) {
s = pcibr_lock(pcibr_soft);
nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
for (f = 0; f < nf; ++f)
if (pcibr_infoh[f])
cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
PCI_CFG_COMMAND/4);
error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
NULL);
ctlreg = bridge->b_wid_control;
bridge->b_wid_control = ctlreg & ~BRIDGE_CTRL_RST_PIN(pciio_slot);
nano_delay(&ts);
bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST_PIN(pciio_slot);
nano_delay(&ts);
for (f = 0; f < nf; ++f)
if ((pcibr_info = pcibr_infoh[f]))
for (win = 0; win < 6; ++win)
if (pcibr_info->f_window[win].w_base != 0)
pcibr_func_config_set(bridge, pciio_slot, f,
PCI_CFG_BASE_ADDR(win) / 4,
pcibr_info->f_window[win].w_base);
for (f = 0; f < nf; ++f)
if (pcibr_infoh[f])
pcibr_func_config_set(bridge, pciio_slot, f,
PCI_CFG_COMMAND / 4,
cfgctl[f]);
pcibr_unlock(pcibr_soft, s);
if (error)
return(-1);
return 0;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
"pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
#endif /* PIC_LATER */
return -1;
return -1;
}
pciio_endian_t
......@@ -3836,13 +3700,7 @@ pcibr_endian_set(vertex_hdl_t pconn_vhdl,
}
pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_endian_set: Device(%d): %x\n",
pciio_slot, devreg, device_bits));
#else
printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
#endif
return desired_end;
}
......@@ -4026,13 +3884,7 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
}
}
pcibr_unlock(pcibr_soft, s);
#ifdef PIC_LATER
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_device_flags_set: Device(%d): %x\n",
pciio_slot, devreg, device_bits));
#else
printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
#endif
}
return (1);
}
......
......@@ -30,40 +30,8 @@
*/
#include <asm/sn/xtalk/xbow.h>
#define DEV_FUNC(dev,func) xbow_##func
#if !defined(DEV_FUNC)
/*
* There is more than one possible provider
* for this platform. We need to examine the
* master vertex of the current vertex for
* a provider function structure, and indirect
* through the appropriately named member.
*/
#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
static xswitch_provider_t *
xwidget_to_provider_fns(vertex_hdl_t xconn)
{
vertex_hdl_t busv;
xswitch_info_t xswitch_info;
xswitch_provider_t provider_fns;
busv = hwgraph_connectpt_get(xconn_vhdl);
ASSERT(busv != GRAPH_VERTEX_NONE);
xswitch_info = xswitch_info_get(busv);
ASSERT(xswitch_info != NULL);
provider_fns = xswitch_info->xswitch_fns;
ASSERT(provider_fns != NULL);
return provider_fns;
}
#endif
#define XSWITCH_CENSUS_BIT(port) (1<<(port))
#define XSWITCH_CENSUS_PORT_MIN (0x0)
#define XSWITCH_CENSUS_PORT_MAX (0xF)
#define XSWITCH_CENSUS_PORTS (0x10)
#define XSWITCH_WIDGET_PRESENT(infop,port) ((infop)->census & XSWITCH_CENSUS_BIT(port))
......@@ -94,28 +62,20 @@ xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
vertex_hdl_t xwidget)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX)
return;
xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget;
xswitch_info->vhdl[port] = xwidget;
}
vertex_hdl_t
xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX)
return GRAPH_VERTEX_NONE;
return xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN];
return xswitch_info->vhdl[port];
}
/*
......@@ -128,28 +88,20 @@ xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
vertex_hdl_t master_vhdl)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX)
return;
xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl;
xswitch_info->master_vhdl[port] = master_vhdl;
}
vertex_hdl_t
xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX)
return GRAPH_VERTEX_NONE;
return xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN];
return xswitch_info->master_vhdl[port];
}
void
......@@ -170,9 +122,7 @@ xswitch_info_new(vertex_hdl_t xwidget)
NEW(xswitch_info);
xswitch_info->census = 0;
for (port = XSWITCH_CENSUS_PORT_MIN;
port <= XSWITCH_CENSUS_PORT_MAX;
port++) {
for (port = 0; port <= XSWITCH_CENSUS_PORT_MAX; port++) {
xswitch_info_vhdl_set(xswitch_info, port,
GRAPH_VERTEX_NONE);
......@@ -204,11 +154,6 @@ xswitch_info_link_is_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
int
xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return 0;
#endif
if (port > XSWITCH_CENSUS_PORT_MAX)
return 0;
......@@ -218,6 +163,5 @@ xswitch_info_link_ok(xswitch_info_t xswitch_info, xwidgetnum_t port)
int
xswitch_reset_link(vertex_hdl_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, reset_link)
(xconn_vhdl);
return xbow_reset_link(xconn_vhdl);
}
......@@ -9,8 +9,6 @@
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y := probe.o setup.o sv.o bte.o irq.o mca.o \
idle.o sn2/
obj-y += probe.o setup.o bte.o irq.o mca.o idle.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o
......@@ -121,14 +121,17 @@ sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
static void
sn_cpei_timer_handler(unsigned long dummy) {
sn_cpei_timer_handler(unsigned long dummy)
{
sn_cpei_handler(-1, NULL, NULL);
mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
}
void
sn_init_cpei_timer() {
sn_init_cpei_timer(void)
{
init_timer(&sn_cpei_timer);
sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
sn_cpei_timer.function = sn_cpei_timer_handler;
add_timer(&sn_cpei_timer);
add_timer(&sn_cpei_timer);
}
This diff is collapsed.
......@@ -9,8 +9,6 @@
#ifndef _ASM_IA64_SN_DMAMAP_H
#define _ASM_IA64_SN_DMAMAP_H
#include <asm/sn/sv.h>
#ifdef __cplusplus
extern "C" {
#endif
......@@ -66,8 +64,6 @@ extern struct map *a32map[];
extern int a24_mapsize;
extern int a32_mapsize;
extern sv_t dmamapout;
#ifdef __cplusplus
}
#endif
......
......@@ -99,7 +99,6 @@ extern int hwgraph_info_replace_LBL(vertex_hdl_t, char *, arbitrary_info_t,
extern int hwgraph_info_get_exported_LBL(vertex_hdl_t, char *, int *, arbitrary_info_t *);
extern int hwgraph_info_get_next_LBL(vertex_hdl_t, char *, arbitrary_info_t *,
labelcl_info_place_t *);
extern int hwgraph_path_lookup(vertex_hdl_t, char *, vertex_hdl_t *, char **);
extern int hwgraph_info_export_LBL(vertex_hdl_t, char *, int);
extern int hwgraph_info_unexport_LBL(vertex_hdl_t, char *);
extern int hwgraph_info_remove_LBL(vertex_hdl_t, char *, arbitrary_info_t *);
......
......@@ -14,7 +14,6 @@
#include <asm/sn/vector.h>
#include <asm/sn/addrs.h>
#include <asm/atomic.h>
#include <asm/sn/sv.h>
/* L1 Target Addresses */
/*
......
......@@ -127,8 +127,7 @@ typedef struct irqpda_s irqpda_t;
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnode) ((cnode == CNODEID_NONE) || \
(node_data(cnode)->active_cpu_count == 0))
#define is_headless_node(cnode) (!test_bit(cnode, &node_has_active_cpus))
/*
* Check if given a node vertex handle the corresponding node has all the
......
......@@ -97,7 +97,6 @@ extern void setup_replication_mask(int maxnodes);
/* init.c */
extern cnodeid_t get_compact_nodeid(void); /* get compact node id */
extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
extern void per_cpu_init(void);
extern int is_fine_dirmode(void);
extern void update_node_information(cnodeid_t);
......@@ -177,7 +176,7 @@ typedef struct hubinfo_s {
/* structures for PIO management */
xwidgetnum_t h_widgetid; /* my widget # (as viewed from xbow) */
struct hub_piomap_s h_small_window_piomap[HUB_WIDGET_ID_MAX+1];
sv_t h_bwwait; /* wait for big window to free */
wait_queue_head_t h_bwwait; /* wait for big window to free */
spinlock_t h_bwlock; /* guard big window piomap's */
spinlock_t h_crblock; /* gaurd CRB error handling */
int h_num_big_window_fixed; /* count number of FIXED maps */
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This implemenation of synchronization variables is heavily based on
* one done by Steve Lord <lord@sgi.com>
*
* Paul Cassella <pwc@sgi.com>
*/
#ifndef _ASM_IA64_SN_SV_H
#define _ASM_IA64_SN_SV_H
#include <linux/spinlock.h>
#include <asm/semaphore.h>
#ifndef ASSERT
#define ASSERT(x) do { \
if(!(x)) { \
printk(KERN_ERR "%s\n", "Assertion failed: " # x); \
BUG(); \
} \
} while(0)
#define _SV_ASSERT
#endif
typedef void sv_mon_lock_t;
typedef void (*sv_mon_unlock_func_t)(sv_mon_lock_t *lock);
/* sv_flags values: */
#define SV_ORDER_FIFO 0x001
#define SV_ORDER_FILO 0x002
#define SV_ORDER_LIFO SV_ORDER_FILO
/* If at some point one order becomes preferable to others, we can
switch to it if the caller of sv_init doesn't specify. */
#define SV_ORDER_DEFAULT SV_ORDER_FIFO
#define SV_ORDER_MASK 0x00f
#define SV_MON_SEMA 0x010
#define SV_MON_SPIN 0x020
#define SV_MON_MASK 0x0f0
/*
If the monitor lock can be aquired from interrupts. Note that this
is a superset of the cases in which the sv can be touched from
interrupts.
This is currently only valid when the monitor lock is a spinlock.
If this is used, sv_wait, sv_signal, and sv_broadcast must all be
called with interrupts disabled, which has to happen anyway to have
acquired the monitor spinlock.
*/
#define SV_INTS 0x100
/* ditto for bottom halves */
#define SV_BHS 0x200
/* sv_wait_flag values: */
#define SV_WAIT_SIG 0x001 /* Allow sv_wait to be interrupted by a signal */
typedef struct sv_s {
wait_queue_head_t sv_waiters;
sv_mon_lock_t *sv_mon_lock; /* Lock held for exclusive access to monitor. */
sv_mon_unlock_func_t sv_mon_unlock_func;
spinlock_t sv_lock; /* Spinlock protecting the sv itself. */
int sv_flags;
} sv_t;
#define DECLARE_SYNC_VARIABLE(sv, l, f) sv_t sv = sv_init(&sv, l, f)
/*
* @sv the sync variable to initialize
* @monitor_lock the lock enforcing exclusive running in the monitor
* @flags one of
* SV_MON_SEMA monitor_lock is a semaphore
* SV_MON_SPIN monitor_lock is a spinlock
* and a bitwise or of some subset of
* SV_INTS - the monitor lock can be acquired from interrupts (and
* hence, whenever we hold it, interrupts are disabled or
* we're in an interrupt.) This is only valid when
* SV_MON_SPIN is set.
*/
void sv_init(sv_t *sv, sv_mon_lock_t *monitor_lock, int flags);
/*
* Set SV_WAIT_SIG in sv_wait_flags to let the sv_wait be interrupted by signals.
*
* timeout is how long to wait before giving up, or 0 to wait
* indefinitely. It is given in jiffies, and is relative.
*
* The associated lock must be locked on entry. It is unlocked on return.
*
* Return values:
*
* n < 0 : interrupted, -n jiffies remaining on timeout, or -1 if timeout == 0
* n = 0 : timeout expired
* n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
*/
extern signed long sv_wait(sv_t *sv, int sv_wait_flags,
unsigned long timeout /* relative jiffies */);
static inline int sv_wait_compat(sv_t *sv, sv_mon_lock_t *lock, int sv_wait_flags,
unsigned long timeout, int sv_mon_type)
{
ASSERT(sv_mon_type == (sv->sv_flags & SV_MON_MASK));
if(sv->sv_mon_lock)
ASSERT(lock == sv->sv_mon_lock);
else
sv->sv_mon_lock = lock;
return sv_wait(sv, sv_wait_flags, timeout);
}
/* These work like Irix's sv_wait() and sv_wait_sig(), except the
caller must call the one correpsonding to the type of the monitor
lock. */
#define sv_spin_wait(sv, lock) \
sv_wait_compat(sv, lock, 0, 0, SV_MON_SPIN)
#define sv_spin_wait_sig(sv, lock) \
sv_wait_compat(sv, lock, SV_WAIT_SIG, 0, SV_MON_SPIN)
#define sv_sema_wait(sv, lock) \
sv_wait_compat(sv, lock, 0, 0, SV_MON_SEMA)
#define sv_sema_wait_sig(sv, lock) \
sv_wait_compat(sv, lock, SV_WAIT_SIG, 0, SV_MON_SEMA)
/* These work as in Irix. */
void sv_signal(sv_t *sv);
void sv_broadcast(sv_t *sv);
/* This works as in Irix. */
void sv_destroy(sv_t *sv);
#ifdef _SV_ASSERT
#undef ASSERT
#undef _SV_ASSERT
#endif
#endif /* _ASM_IA64_SN_SV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment