Commit 96d333c6 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.99pre4-4

parent 0239d6ee
BOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml kernel-api.sgml
BOOKS := wanbook.sgml z8530book.sgml mcabook.sgml videobook.sgml kernel-api.sgml parportbook.sgml
PS := $(patsubst %.sgml, %.ps, $(BOOKS))
PDF := $(patsubst %.sgml, %.pdf, $(BOOKS))
......@@ -47,6 +47,10 @@ kernel-api.sgml: kernel-api.tmpl
$(TOPDIR)/net/netsyms.c \
<kernel-api.tmpl >kernel-api.sgml
parportbook.sgml: parportbook.tmpl
$(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/parport/init.c \
<parportbook.tmpl >parportbook.sgml
DVI := $(patsubst %.sgml, %.dvi, $(BOOKS))
AUX := $(patsubst %.sgml, %.aux, $(BOOKS))
TEX := $(patsubst %.sgml, %.tex, $(BOOKS))
......
--- drivers/usb/usb-storage.c.orig Thu Mar 30 20:31:08 2000
+++ drivers/usb/usb-storage.c Sun Apr 2 15:20:21 2000
@@ -1417,6 +1418,9 @@
/* lock the device pointers */
spin_lock_irqsave(&(us->dev_spinlock), flags);
+ /* lock the device pointers */
+ spin_lock_irqsave(&(us->dev_spinlock), flags);
+
/* our device has gone - pretend not ready */
/* FIXME: we also need to handle INQUIRY here,
* probably */
@@ -1862,6 +1866,9 @@
US_DEBUGP("-- device was not in use\n");
return;
}
+
+ /* lock access to the device data structure */
+ spin_lock_irqsave(&(ss->dev_spinlock), flags);
/* lock access to the device data structure */
spin_lock_irqsave(&(ss->dev_spinlock), flags);
......@@ -481,7 +481,7 @@ static void attempt_merge(request_queue_t * q,
elevator_merge_requests(&q->elevator, req, next);
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
req->nr_sectors += next->nr_sectors;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
next->rq_status = RQ_INACTIVE;
list_del(&next->queue);
wake_up (&wait_for_request);
......@@ -685,7 +685,7 @@ static inline void __make_request(request_queue_t * q, int rw,
break;
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
req->nr_sectors += count;
req->nr_sectors = req->hard_nr_sectors += count;
drive_stat_acct(req, count, 0);
elevator_merge_after(elevator, req, latency);
......@@ -714,8 +714,8 @@ static inline void __make_request(request_queue_t * q, int rw,
req->bh = bh;
req->buffer = bh->b_data;
req->current_nr_sectors = count;
req->sector = sector;
req->nr_sectors += count;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += count;
drive_stat_acct(req, count, 0);
elevator_merge_before(elevator, req, latency);
......@@ -754,8 +754,8 @@ static inline void __make_request(request_queue_t * q, int rw,
/* fill up the request-info, and add it to the queue */
req->cmd = rw;
req->errors = 0;
req->sector = sector;
req->nr_sectors = count;
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = count;
req->current_nr_sectors = count;
req->nr_segments = 1; /* Always 1 for a new request. */
req->nr_hw_segments = 1; /* Always 1 for a new request. */
......@@ -920,23 +920,21 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
int nsect;
req->errors = 0;
if (!uptodate) {
if (!uptodate)
printk("end_request: I/O error, dev %s (%s), sector %lu\n",
kdevname(req->rq_dev), name, req->sector);
if ((bh = req->bh) != NULL) {
nsect = bh->b_size >> 9;
req->nr_sectors--;
req->nr_sectors &= ~(nsect - 1);
req->sector += nsect;
req->sector &= ~(nsect - 1);
}
}
if ((bh = req->bh) != NULL) {
nsect = bh->b_size >> 9;
req->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
bh->b_end_io(bh, uptodate);
if ((bh = req->bh) != NULL) {
req->hard_sector += nsect;
req->hard_nr_sectors -= nsect;
req->sector = req->hard_sector;
req->nr_sectors = req->hard_nr_sectors;
req->current_nr_sectors = bh->b_size >> 9;
if (req->nr_sectors < req->current_nr_sectors) {
req->nr_sectors = req->current_nr_sectors;
......
......@@ -765,7 +765,7 @@ static void do_pcd_request (request_queue_t * q)
pcd_unit = unit;
}
pcd_sector = CURRENT->sector;
pcd_count = CURRENT->nr_sectors;
pcd_count = CURRENT->current_nr_sectors;
pcd_buf = CURRENT->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read,0,0,nice);
......
......@@ -385,9 +385,62 @@ void pd_init_units( void )
}
}
static inline int pd_new_segment(request_queue_t *q, struct request *req, int max_segments)
{
if (max_segments > cluster)
max_segments = cluster;
if (req->nr_segments < max_segments) {
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
return 0;
}
static int pd_back_merge_fn(request_queue_t *q, struct request *req,
struct buffer_head *bh, int max_segments)
{
if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
return 1;
return pd_new_segment(q, req, max_segments);
}
static int pd_front_merge_fn(request_queue_t *q, struct request *req,
struct buffer_head *bh, int max_segments)
{
if (bh->b_data + bh->b_size == req->bh->b_data)
return 1;
return pd_new_segment(q, req, max_segments);
}
static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next, int max_segments)
{
int total_segments = req->nr_segments + next->nr_segments;
int same_segment;
if (max_segments > cluster)
max_segments = cluster;
same_segment = 0;
if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
total_segments--;
same_segment = 1;
}
if (total_segments > max_segments)
return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
int pd_init (void)
{ int i;
request_queue_t * q;
if (disable) return -1;
if (devfs_register_blkdev(MAJOR_NR,name,&pd_fops)) {
......@@ -395,7 +448,11 @@ int pd_init (void)
name,major);
return -1;
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
blk_init_queue(q, DEVICE_REQUEST);
q->back_merge_fn = pd_back_merge_fn;
q->front_merge_fn = pd_front_merge_fn;
q->merge_requests_fn = pd_merge_requests_fn;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
pd_gendisk.major = major;
......@@ -865,7 +922,6 @@ static int pd_ready( void )
static void do_pd_request (request_queue_t * q)
{ struct buffer_head * bh;
struct request * req;
int unit;
if (pd_busy) return;
......@@ -876,12 +932,10 @@ static void do_pd_request (request_queue_t * q)
pd_dev = MINOR(CURRENT->rq_dev);
pd_unit = unit = DEVICE_NR(CURRENT->rq_dev);
pd_block = CURRENT->sector;
pd_count = CURRENT->nr_sectors;
pd_run = CURRENT->nr_sectors;
pd_count = CURRENT->current_nr_sectors;
bh = CURRENT->bh;
req = CURRENT;
if (bh->b_reqnext)
printk("%s: OUCH: b_reqnext != NULL\n",PD.name);
if ((pd_dev >= PD_DEVS) ||
((pd_block+pd_count) > pd_hd[pd_dev].nr_sects)) {
......@@ -890,14 +944,6 @@ static void do_pd_request (request_queue_t * q)
}
pd_cmd = CURRENT->cmd;
pd_run = pd_count;
while ((pd_run <= cluster) &&
(req = blkdev_next_request(req)) &&
(pd_block+pd_run == req->sector) &&
(pd_cmd == req->cmd) &&
(pd_dev == MINOR(req->rq_dev)))
pd_run += req->nr_sectors;
pd_poffs = pd_hd[pd_dev].start_sect;
pd_block += pd_poffs;
pd_buf = CURRENT->buffer;
......@@ -932,7 +978,7 @@ static void pd_next_buf( int unit )
printk("%s: OUCH: request list changed unexpectedly\n",
PD.name);
pd_count = CURRENT->nr_sectors;
pd_count = CURRENT->current_nr_sectors;
pd_buf = CURRENT->buffer;
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
......
......@@ -339,9 +339,62 @@ void pf_init_units( void )
}
}
static inline int pf_new_segment(request_queue_t *q, struct request *req, int max_segments)
{
if (max_segments > cluster)
max_segments = cluster;
if (req->nr_segments < max_segments) {
req->nr_segments++;
q->elevator.nr_segments++;
return 1;
}
return 0;
}
static int pf_back_merge_fn(request_queue_t *q, struct request *req,
struct buffer_head *bh, int max_segments)
{
if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
return 1;
return pf_new_segment(q, req, max_segments);
}
static int pf_front_merge_fn(request_queue_t *q, struct request *req,
struct buffer_head *bh, int max_segments)
{
if (bh->b_data + bh->b_size == req->bh->b_data)
return 1;
return pf_new_segment(q, req, max_segments);
}
static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next, int max_segments)
{
int total_segments = req->nr_segments + next->nr_segments;
int same_segment;
if (max_segments > cluster)
max_segments = cluster;
same_segment = 0;
if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
total_segments--;
same_segment = 1;
}
if (total_segments > max_segments)
return 0;
q->elevator.nr_segments -= same_segment;
req->nr_segments = total_segments;
return 1;
}
int pf_init (void) /* preliminary initialisation */
{ int i;
request_queue_t * q;
if (disable) return -1;
......@@ -355,7 +408,11 @@ int pf_init (void) /* preliminary initialisation */
major);
return -1;
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
blk_init_queue(q, DEVICE_REQUEST);
q->back_merge_fn = pf_back_merge_fn;
q->front_merge_fn = pf_front_merge_fn;
q->merge_requests_fn = pf_merge_requests_fn;
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
......@@ -849,7 +906,6 @@ static int pf_ready( void )
static void do_pf_request (request_queue_t * q)
{ struct buffer_head * bh;
struct request * req;
int unit;
if (pf_busy) return;
......@@ -859,12 +915,10 @@ static void do_pf_request (request_queue_t * q)
pf_unit = unit = DEVICE_NR(CURRENT->rq_dev);
pf_block = CURRENT->sector;
pf_count = CURRENT->nr_sectors;
pf_run = CURRENT->nr_sectors;
pf_count = CURRENT->current_nr_sectors;
bh = CURRENT->bh;
req = CURRENT;
if (bh->b_reqnext)
printk("%s: OUCH: b_reqnext != NULL\n",PF.name);
if ((pf_unit >= PF_UNITS) || (pf_block+pf_count > PF.capacity)) {
end_request(0);
......@@ -872,14 +926,6 @@ static void do_pf_request (request_queue_t * q)
}
pf_cmd = CURRENT->cmd;
pf_run = pf_count;
while ((pf_run <= cluster) &&
(req = blkdev_next_request(req)) &&
(pf_block+pf_run == req->sector) &&
(pf_cmd == req->cmd) &&
(pf_unit == DEVICE_NR(req->rq_dev)))
pf_run += req->nr_sectors;
pf_buf = CURRENT->buffer;
pf_retries = 0;
......@@ -912,7 +958,7 @@ static void pf_next_buf( int unit )
printk("%s: OUCH: request list changed unexpectedly\n",
PF.name);
pf_count = CURRENT->nr_sectors;
pf_count = CURRENT->current_nr_sectors;
pf_buf = CURRENT->buffer;
spin_unlock_irqrestore(&io_request_lock,saved_flags);
}
......
......@@ -39,6 +39,7 @@
* 990605 Made changes to code to support Firmware 1.22a, added
* fairly useless proc entry.
* 990610 removed said useless proc code for the merge <alan>
* 000403 Removed last traces of proc code. <davej>
*/
#include <linux/module.h>
......@@ -541,38 +542,6 @@ static void debug_off(void)
mode_debug = 0;
}
static int pcwd_proc_get_info(char *buffer, char **start, off_t offset,
int length, int inout)
{
int len;
off_t begin = 0;
revision = get_revision();
len = sprintf(buffer, "Version = " WD_VER "\n");
if (revision == PCWD_REVISION_A)
len += sprintf(buffer + len, "Revision = A\n");
else
len += sprintf(buffer + len, "Revision = C\n");
if (supports_temp) {
unsigned short c = inb(current_readport);
len += sprintf(buffer + len, "Temp = Yes\n"
"Current temp = %d (Celsius)\n",
c);
} else
len += sprintf(buffer + len, "Temp = No\n");
*start = buffer + (offset);
len -= offset;
if (len > length)
len = length;
return len;
}
static struct file_operations pcwd_fops = {
read: pcwd_read,
write: pcwd_write,
......
......@@ -537,7 +537,7 @@ static int pp_release (struct inode * inode, struct file * file)
unsigned int minor = MINOR (inode->i_rdev);
struct pp_struct *pp = file->private_data;
if (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT) {
if (pp->pdev && pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT) {
if (!(pp->flags & PP_CLAIMED)) {
parport_claim_or_block (pp->pdev);
pp->flags |= PP_CLAIMED;
......
......@@ -813,13 +813,17 @@ static void idedisk_setup (ide_drive_t *drive)
(!drive->forced_geom) && drive->bios_sect && drive->bios_head)
drive->bios_cyl = (capacity / drive->bios_sect) / drive->bios_head;
#if 0 /* done instead for entire identify block in arch/ide.h stuff */
/* fix byte-ordering of buffer size field */
id->buf_size = le16_to_cpu(id->buf_size);
#endif
printk (KERN_INFO "%s: %.40s, %ldMB w/%dkB Cache, CHS=%d/%d/%d",
drive->name, id->model,
capacity/2048L, id->buf_size/2,
printk (KERN_INFO "%s: %ld sectors", drive->name, capacity);
/* Give size in megabytes (MB), not mebibytes (MiB). */
/* We compute the exact rounded value, avoiding overflow. */
printk (" (%ld MB)", (capacity - capacity/625 + 974)/1950);
/* Only print cache size when it was specified */
if (id->buf_size)
printk (" w/%dKiB Cache", id->buf_size/2);
printk(", CHS=%d/%d/%d",
drive->bios_cyl, drive->bios_head, drive->bios_sect);
#ifdef CONFIG_BLK_DEV_IDEDMA
if (drive->using_dma)
......
......@@ -1662,6 +1662,7 @@ void ide_init_drive_cmd (struct request *rq)
rq->cmd = IDE_DRIVE_CMD;
rq->sector = 0;
rq->nr_sectors = 0;
rq->nr_segments = 0;
rq->current_nr_sectors = 0;
rq->sem = NULL;
rq->bh = NULL;
......
......@@ -48,14 +48,8 @@
* Aug 8, 1998 acme Initial version.
*/
#ifdef MODULE
#ifdef MODVERSIONS
#include <linux/modversions.h>
#endif
#include <linux/init.h> /* __init */
#include <linux/module.h>
#else
#define EXPORT_SYMBOL(function)
#endif
#include <linux/kernel.h> /* printk(), and other useful stuff */
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/errno.h> /* return codes */
......@@ -66,12 +60,10 @@
#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
#define MOD_VERSION 0
#define MOD_RELEASE 5
#define MOD_RELEASE 6
#ifdef MODULE
MODULE_AUTHOR("Arnaldo Carvalho de Melo");
MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
#endif
/* Function Prototypes */
/* Module entry points. These are called by the OS and must be public. */
......@@ -129,20 +121,21 @@ static u32 cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
* Return: 0 Ok
* < 0 error.
* Context: process */
#ifdef MODULE
int init_module(void)
int __init cycx_drv_init(void)
{
printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
copyright);
return 0;
}
/* Module 'remove' entry point.
* o release all remaining system resources */
void cleanup_module(void)
void cycx_drv_cleanup(void)
{
}
#endif
/* Kernel APIs */
/* Set up adapter.
* o detect adapter type
......@@ -599,4 +592,8 @@ static u16 checksum(u8 *buf, u32 len)
return crc;
}
module_init(cycx_drv_init);
module_exit(cycx_drv_cleanup);
/* End */
......@@ -13,6 +13,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* 2000/04/02 acme dprintk and cycx_debug
* module_init/module_exit
* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
* and cyclomx_close to cyclomx_mod_dec_use_count
* 2000/01/08 acme cleanup
......@@ -43,15 +45,21 @@
#include <asm/uaccess.h> /* kernel <-> user copy */
#include <linux/init.h> /* __init (when not using as a module) */
/* Debug */
unsigned int cycx_debug = 0;
#ifdef MODULE
MODULE_AUTHOR("Arnaldo Carvalho de Melo");
MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "cyclomx debug level");
#endif
/* Defines & Macros */
#define DRV_VERSION 0 /* version number */
#define DRV_RELEASE 6 /* release (minor version) number */
#define DRV_RELEASE 7 /* release (minor version) number */
#define MAX_CARDS 1 /* max number of adapters */
#ifndef CONFIG_CYCLOMX_CARDS /* configurable option */
......@@ -60,10 +68,6 @@ MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
/* Function Prototypes */
/* Module entry points */
int init_module (void);
void cleanup_module (void);
/* WAN link driver entry points */
static int setup (wan_device_t *wandev, wandev_conf_t *conf);
static int shutdown (wan_device_t *wandev);
......@@ -98,11 +102,7 @@ static cycx_t *card_array = NULL; /* adapter data space */
* < 0 error.
* Context: process
*/
#ifdef MODULE
int init_module (void)
#else
int __init cyclomx_init (void)
#endif
{
int cnt, err = 0;
......@@ -156,8 +156,7 @@ int __init cyclomx_init (void)
* o unregister all adapters from the WAN router
* o release all remaining system resources
*/
#ifdef MODULE
void cleanup_module (void)
void cyclomx_cleanup (void)
{
int i = 0;
......@@ -168,7 +167,7 @@ void cleanup_module (void)
kfree(card_array);
}
#endif
/* WAN Device Driver Entry Points */
/*
* Setup/configure WAN link driver.
......@@ -385,4 +384,7 @@ void cyclomx_set_state (cycx_t *card, int state)
spin_unlock_irqrestore(&card->lock, host_cpu_flags);
}
module_init(cyclomx_init);
module_exit(cyclomx_cleanup);
/* End */
......@@ -12,6 +12,10 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* 2000/04/02 acme dprintk, cycx_debug
* fixed the bug introduced in get_dev_by_lcn and
* get_dev_by_dte_addr by the anonymous hacker
* that converted this driver to softnet
* 2000/01/08 acme cleanup
* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
* that we have a X.25 stack implemented in
......@@ -110,7 +114,7 @@ typedef struct x25_channel {
u32 idle_tmout; /* sec, before disconnecting */
struct sk_buff *rx_skb; /* receive socket buffer */
cycx_t *card; /* -> owner */
struct enet_statistics ifstats; /* interface statistics */
struct net_device_stats ifstats;/* interface statistics */
} x25_channel_t;
/* Function Prototypes */
......@@ -178,13 +182,11 @@ static void hex_dump(char *msg, unsigned char *p, int len);
static void x25_dump_config(TX25Config *conf);
static void x25_dump_stats(TX25Stats *stats);
static void x25_dump_devs(wan_device_t *wandev);
#define dprintk(format, a...) printk(format, ##a)
#else
#define hex_dump(msg, p, len)
#define x25_dump_config(conf)
#define x25_dump_stats(stats)
#define x25_dump_devs(wandev)
#define dprintk(format, a...)
#endif
/* Public Functions */
......@@ -846,7 +848,7 @@ static void connect_intr (cycx_t *card, TX25Cmd *cmd)
if (sizerem)
nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
dprintk(KERN_INFO "connect_intr:lcn=%d, local=%s, remote=%s\n",
dprintk(1, KERN_INFO "connect_intr:lcn=%d, local=%s, remote=%s\n",
lcn, loc, rem);
if ((dev = get_dev_by_dte_addr(wandev, rem)) == NULL) {
......@@ -872,7 +874,7 @@ static void connect_confirm_intr (cycx_t *card, TX25Cmd *cmd)
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
dprintk(KERN_INFO "%s: connect_confirm_intr:lcn=%d, key=%d\n",
dprintk(1, KERN_INFO "%s: connect_confirm_intr:lcn=%d, key=%d\n",
card->devname, lcn, key);
if ((dev = get_dev_by_lcn(wandev, -key)) == NULL) {
......@@ -897,7 +899,7 @@ static void disconnect_confirm_intr (cycx_t *card, TX25Cmd *cmd)
u8 lcn;
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
dprintk(KERN_INFO "%s: disconnect_confirm_intr:lcn=%d\n",
dprintk(1, KERN_INFO "%s: disconnect_confirm_intr:lcn=%d\n",
card->devname, lcn);
if ((dev = get_dev_by_lcn(wandev, lcn)) == NULL) {
/* Invalid channel, discard packet */
......@@ -917,7 +919,7 @@ static void disconnect_intr (cycx_t *card, TX25Cmd *cmd)
u8 lcn;
cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
dprintk(KERN_INFO "disconnect_intr:lcn=%d\n", lcn);
dprintk(1, KERN_INFO "disconnect_intr:lcn=%d\n", lcn);
if ((dev = get_dev_by_lcn(wandev, lcn)) != NULL) {
x25_channel_t *chan = dev->priv;
......@@ -1172,7 +1174,7 @@ static int x25_place_call (cycx_t *card, x25_channel_t *chan)
key = ffz(card->u.x.connection_keys);
set_bit(key, (void*)&card->u.x.connection_keys);
++key;
dprintk(KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
memset(d, 0, sizeof(d));
d[1] = key; /* user key */
d[2] = 0x10;
......@@ -1259,6 +1261,8 @@ static struct net_device *get_dev_by_lcn (wan_device_t *wandev, s16 lcn)
x25_channel_t *chan;
while (dev) {
chan = (x25_channel_t*)dev->priv;
if (chan->lcn == lcn)
break;
dev = chan->slave;
......@@ -1273,6 +1277,8 @@ static struct net_device *get_dev_by_dte_addr (wan_device_t *wandev, char *dte)
x25_channel_t *chan;
while (dev) {
chan = (x25_channel_t*)dev->priv;
if (!strcmp(chan->addr, dte))
break;
dev = chan->slave;
......@@ -1296,7 +1302,7 @@ static int chan_connect (struct net_device *dev)
if (!chan->addr[0])
return -EINVAL; /* no destination address */
dprintk(KERN_INFO "%s: placing X.25 call to %s...\n",
dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
card->devname, chan->addr);
if (x25_place_call(card, chan))
......
......@@ -103,6 +103,19 @@ static void get_lowlevel_driver (void)
request_module ("parport_lowlevel");
}
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
*
* This can be called by a parallel port device driver in order to
* receive notifications about ports being found in the system, as
* well as ports no longer available.
*
* The @drv structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
* Returns 0 on success. Currently it always succeeds.
**/
int parport_register_driver (struct parport_driver *drv)
{
struct parport *port;
......@@ -121,6 +134,23 @@ int parport_register_driver (struct parport_driver *drv)
return 0;
}
/**
* parport_unregister_driver - deregister a parallel port device driver
* @arg: structure describing the driver that was given to
* parport_register_driver()
*
* This should be called by a parallel port device driver that has
* registered itself using parport_register_driver() when it is about
* to be unloaded.
*
* When it returns, the driver's attach() routine will no longer be
* called, and for each port that attach() was called for, the
* detach() routine will hae been called.
*
* If the caller's attach() function can block, it is their
* responsibility to make sure to wait for it to exit before
* unloading.
**/
void parport_unregister_driver (struct parport_driver *arg)
{
struct parport_driver *drv = driver_chain, *olddrv = NULL;
......@@ -149,8 +179,17 @@ void parport_unregister_driver (struct parport_driver *arg)
}
}
/* Return a list of all the ports we know about. This function shouldn't
* really be used -- use parport_register_driver instead. */
/**
* parport_enumerate - return a list of the system's parallel ports
*
* This returns the head of the list of parallel ports in the system.
* The structure that is returned describes the first port in the
* list, and its 'next' member points to the next port, or %NULL if
* it's the last port.
*
* If there are no parallel ports in the system, parport_enumerate()
* will return %NULL.
**/
struct parport *parport_enumerate(void)
{
if (!portlist)
......@@ -159,6 +198,33 @@ struct parport *parport_enumerate(void)
return portlist;
}
/**
* parport_register_port - register a parallel port
* @base: base I/O address
* @irq: IRQ line
* @dma: DMA channel
* @ops: pointer to the port driver's port operations structure
*
* When a parallel port (lowlevel) driver finds a port that should be
* made available to parallel port device drivers, it should call
* parport_register_port(). The @base, @irq, and @dma parameters are
* for the convenience of port drivers, and for ports where they
* aren't meaningful needn't be set to anything special. They can be
* altered afterwards by adjusting the relevant members of the parport
* structure that is returned and represents the port. They should
* not be tampered with after calling parport_announce_port, however.
*
* If there are parallel port device drivers in the system that have
* registered themselves using parport_register_driver(), they are not
* told about the port at this time; that is done by
* parport_announce_port().
*
* The @ops structure is allocated by the caller, and must not be
* deallocated before calling parport_unregister_port().
*
* If there is no memory to allocate a new parport structure, this
* function will return %NULL.
**/
struct parport *parport_register_port(unsigned long base, int irq, int dma,
struct parport_operations *ops)
{
......@@ -243,6 +309,17 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
return tmp;
}
/**
* parport_announce_port - tell device drivers about a parallel port
* @port: parallel port to announce
*
* After a port driver has registered a parallel port with
* parport_register_port, and performed any necessary initialisation
* or adjustments, it should call parport_announce_port() in order to
* notify all device drivers that have called
* parport_register_driver(). Their attach() functions will be
* called, with @port as the parameter.
**/
void parport_announce_port (struct parport *port)
{
#ifdef CONFIG_PARPORT_1284
......@@ -286,6 +363,23 @@ static void free_port (struct parport *port)
kfree(port);
}
/**
* parport_unregister_port - deregister a parallel port
* @port: parallel port to deregister
*
* When a parallel port driver is forcibly unloaded, or a parallel
* port becomes inaccessible, the port driver must call this function
* in order to deal with device drivers that still want to use it.
*
* The parport structure associated with the port has its operations
* structure replaced with one containing 'null' operations that
* return errors or just don't do anything.
*
* Any drivers that have registered themselves using
* parport_register_driver() are notified that the port is no longer
* accessible by having their detach() routines called with @port as
* the parameter.
**/
void parport_unregister_port(struct parport *port)
{
struct parport *p;
......@@ -320,7 +414,73 @@ void parport_unregister_port(struct parport *port)
free_port (port);
}
struct pardevice *parport_register_device(struct parport *port, const char *name,
/**
* parport_register_device - register a device on a parallel port
* @port: port to which the device is attached
* @name: a name to refer to the device
* @pf: preemption callback
* @kf: kick callback (wake-up)
* @irq_func: interrupt handler
* @flags: registration flags
* @handle: data for callback functions
*
* This function, called by parallel port device drivers, declares
* that a device is connected to a port, and tells the system all it
* needs to know.
*
* The @name is allocated by the caller and must not be deallocated
* until the caller calls @parport_unregister_device for that device.
*
* The preemption callback function, @pf, is called when this device
* driver has claimed access to the port but another device driver
* wants to use it. It is given @handle as its parameter, and should
* return zero if it is willing for the system to release the port to
* another driver on its behalf. If it wants to keep control of the
* port it should return non-zero, and no action will be taken. It is
* good manners for the driver to try to release the port at the
* earliest opportunity after its preemption callback rejects a
* preemption attempt. Note that if a preemption callback is happy
* for preemption to go ahead, there is no need to release the port;
* it is done automatically. This function may not block, as it may
* be called from interrupt context. If the device driver does not
* support preemption, @pf can be %NULL.
*
* The wake-up ("kick") callback function, @kf, is called when the
* port is available to be claimed for exclusive access; that is,
* parport_claim() is guaranteed to succeed when called from inside
* the wake-up callback function. If the driver wants to claim the
* port it should do so; otherwise, it need not take any action. This
* function may not block, as it may be called from interrupt context.
* If the device driver does not want to be explicitly invited to
* claim the port in this way, @kf can be %NULL.
*
* The interrupt handler, @irq_func, is called when an interrupt
* arrives from the parallel port. Note that if a device driver wants
* to use interrupts it should use parport_enable_irq(), and can also
* check the irq member of the parport structure representing the
* port.
*
* The parallel port (lowlevel) driver is the one that has called
* request_irq() and whose interrupt handler is called first. This
* handler does whatever needs to be done to the hardware to
* acknowledge the interrupt (for PC-style ports there is nothing
* special to be done). It then tells the IEEE 1284 code about the
* interrupt, which may involve reacting to an IEEE 1284 event
* depending on the current IEEE 1284 phase. After this, it calls
* @irq_func. Needless to say, @irq_func will be called from
* interrupt context, and may not block.
*
* The %PARPORT_DEV_EXCL flag is for preventing port sharing, and so
* should only be used when sharing the port with other device drivers
* is impossible and would lead to incorrect behaviour. Use it
* sparingly! Normally, @flags will be zero.
*
* This function returns a pointer to a structure that represents the
* device on the port, or %NULL if there is not enough memory to
* allocate space for that structure.
**/
struct pardevice *
parport_register_device(struct parport *port, const char *name,
int (*pf)(void *), void (*kf)(void *),
void (*irq_func)(int, void *, struct pt_regs *),
int flags, void *handle)
......@@ -420,6 +580,12 @@ struct pardevice *parport_register_device(struct parport *port, const char *name
return NULL;
}
/**
* parport_unregister_device - deregister a device on a parallel port
* @dev: pointer to structure representing device
*
* This undoes the effect of parport_register_device().
**/
void parport_unregister_device(struct pardevice *dev)
{
struct parport *port;
......@@ -466,6 +632,16 @@ void parport_unregister_device(struct pardevice *dev)
free_port (port);
}
/**
* parport_claim - claim access to a parallel port device
* @dev: pointer to structure representing a device on the port
*
* This function will not block and so can be used from interrupt
* context. If parport_claim() succeeds in claiming access to the
* port it returns zero and the port is available to use. It may fail
* (returning non-zero) if the port is in use by another driver and
* that driver is not willing to relinquish control of the port.
**/
int parport_claim(struct pardevice *dev)
{
struct pardevice *oldcad;
......@@ -567,6 +743,15 @@ int parport_claim(struct pardevice *dev)
return -EAGAIN;
}
/**
* parport_claim_or_block - claim access to a parallel port device
* @dev: pointer to structure representing a device on the port
*
* This behaves like parport_claim(), but will block if necessary to
* wait for the port to be free. A return value of 1 indicates that
* it slept; 0 means that it succeeded without needing to sleep. A
* negative error code indicates failure.
**/
int parport_claim_or_block(struct pardevice *dev)
{
int r;
......@@ -609,6 +794,14 @@ int parport_claim_or_block(struct pardevice *dev)
return r;
}
/**
* parport_release - give up access to a parallel port device
* @dev: pointer to structure representing parallel port device
*
* This function cannot fail, but it should not be called without the
* port claimed. Similarly, if the port is already claimed you should
* not try claiming it again.
**/
void parport_release(struct pardevice *dev)
{
struct parport *port = dev->port->physport;
......
......@@ -47,8 +47,7 @@ static int is_tree_busy(struct dentry *root)
}
/* Mountpoints don't count */
if (root->d_mounts != root ||
root->d_covers != root) {
if (d_mountpoint(root)) {
DPRINTK(("is_tree_busy: mountpoint\n"));
count--;
}
......@@ -77,8 +76,7 @@ static int is_tree_busy(struct dentry *root)
count += (dentry->d_count - 1);
/* Mountpoints don't count */
if (dentry->d_mounts != dentry ||
dentry->d_covers != dentry) {
if (d_mountpoint(dentry)) {
DPRINTK(("is_tree_busy: mountpoint\n"));
adj++;
}
......
......@@ -21,7 +21,7 @@ static int autofs4_readlink(struct dentry *dentry, char *buffer, int buflen)
static struct dentry * autofs4_follow_link(struct dentry *dentry,
struct dentry *base,
struct vfsmount *mnt,
struct vfsmount **mnt,
unsigned int flags)
{
struct autofs_info *ino = autofs4_dentry_ino(dentry);
......
......@@ -716,15 +716,16 @@ int remove_inode_dquot_ref(struct inode *inode, short type, struct list_head *to
}
inode->i_flags &= ~S_QUOTA;
put_it:
if (dquot != NODQUOT)
if (dquot != NODQUOT) {
if (dqput_blocks(dquot)) {
if (dquot->dq_count != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", dquot->dq_count);
list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
return 1;
}
else
} else {
dqput(dquot); /* We have guaranteed we won't block */
}
}
return 0;
}
......
......@@ -517,24 +517,22 @@ nlmclnt_unlock_callback(struct rpc_task *task)
if (RPC_ASSASSINATED(task))
goto die;
#if 0
/* FIXME: rpc_restart_call() is broken! */
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
return;
goto retry_unlock;
}
#endif
if (status != NLM_LCK_GRANTED
&& status != NLM_LCK_DENIED_GRACE_PERIOD) {
printk("lockd: unexpected unlock status: %d\n", status);
}
die:
rpc_release_task(task);
nlm_release_host(req->a_host);
kfree(req);
return;
retry_unlock:
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
}
/*
......@@ -609,20 +607,14 @@ nlmclnt_cancel_callback(struct rpc_task *task)
}
die:
retry_cancel:
rpc_release_task(task);
nlm_release_host(req->a_host);
kfree(req);
return;
#if 0
/* FIXME: rpc_restart_call() is broken */
retry_cancel:
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
return;
#endif
}
/*
......
......@@ -497,7 +497,6 @@ nlm4svc_callback_exit(struct rpc_task *task)
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
rpc_release_task(task);
nlm_release_host(call->a_host);
kfree(call);
}
......
......@@ -579,7 +579,6 @@ nlmsvc_grant_callback(struct rpc_task *task)
block->b_incall = 0;
nlm_release_host(call->a_host);
rpc_release_task(task);
}
/*
......
......@@ -523,7 +523,6 @@ nlmsvc_callback_exit(struct rpc_task *task)
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
rpc_release_task(task);
kfree(call);
}
......
......@@ -299,6 +299,5 @@ nfs_flushd_exit(struct rpc_task *task)
cache->task = NULL;
spin_unlock(&nfs_flushd_lock);
wake_up(&cache->request_wait);
rpc_release_task(task);
}
......@@ -168,7 +168,6 @@ nfs_readpage_result(struct rpc_task *task)
UnlockPage(page);
page_cache_release(page);
rpc_release_task(task);
kfree(req);
}
......
......@@ -161,7 +161,6 @@ static __inline__ void nfs_writedata_free(struct nfs_write_data *p)
static void nfs_writedata_release(struct rpc_task *task)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
rpc_release_task(task);
nfs_writedata_free(wdata);
}
......@@ -1159,6 +1158,8 @@ nfs_flush_one(struct list_head *head, struct file *file, int how)
/* Finalize the task. */
rpc_init_task(task, clnt, nfs_writeback_done, flags);
task->tk_calldata = data;
/* Release requests */
task->tk_release = nfs_writedata_release;
#ifdef CONFIG_NFS_V3
msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? NFS3PROC_WRITE : NFSPROC_WRITE;
......@@ -1307,7 +1308,6 @@ nfs_writeback_done(struct rpc_task *task)
next:
nfs_unlock_request(req);
}
nfs_writedata_release(task);
}
......@@ -1388,6 +1388,8 @@ nfs_commit_list(struct list_head *head, int how)
rpc_init_task(task, clnt, nfs_commit_done, flags);
task->tk_calldata = data;
/* Release requests */
task->tk_release = nfs_writedata_release;
msg.rpc_proc = NFS3PROC_COMMIT;
msg.rpc_argp = &data->args;
......@@ -1456,7 +1458,6 @@ nfs_commit_done(struct rpc_task *task)
next:
nfs_unlock_request(req);
}
nfs_writedata_release(task);
}
#endif
......
......@@ -343,7 +343,7 @@ asmlinkage long sys_chdir(const char * filename)
if (IS_ERR(name))
goto out;
dentry = lookup_dentry(name, NULL, 0);
dentry = lookup_dentry(name, NULL, LOOKUP_FOLLOW);
putname(name);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
......@@ -432,7 +432,7 @@ asmlinkage long sys_chroot(const char * filename)
if (IS_ERR(name))
goto out;
dentry = lookup_dentry(name, NULL, 0);
dentry = lookup_dentry(name, NULL, LOOKUP_FOLLOW);
putname(name);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
......@@ -703,7 +703,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
if (f->f_mode & FMODE_WRITE) {
error = get_write_access(inode);
if (error)
goto cleanup_dentry;
goto cleanup_file;
}
f->f_dentry = dentry;
......@@ -727,10 +727,11 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
put_write_access(inode);
f->f_dentry = NULL;
f->f_vfsmnt = NULL;
cleanup_file:
put_filp(f);
cleanup_dentry:
dput(dentry);
mntput(mnt);
put_filp(f);
return ERR_PTR(error);
}
......
......@@ -32,6 +32,7 @@ struct request {
int errors;
unsigned long sector;
unsigned long nr_sectors;
unsigned long hard_sector, hard_nr_sectors;
unsigned int nr_segments;
unsigned int nr_hw_segments;
unsigned long current_nr_sectors;
......
......@@ -12,6 +12,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* 2000/04/02 acme dprintk and cycx_debug
* 1999/01/03 acme judicious use of data types
* 1999/01/02 acme #define X25_ACK_N3 0x4411
* 1998/12/28 acme cleanup: lot'o'things removed
......@@ -30,6 +31,11 @@
#define X25_MBOX_OFFS 0x300 /* general mailbox block */
#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
/* Debug */
#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
extern unsigned int cycx_debug;
/* Data Structures */
/* X.25 Command Block. */
typedef struct X25Cmd
......
......@@ -1015,7 +1015,7 @@ extern struct dentry * lookup_dentry(const char *, struct dentry *, unsigned int
extern struct dentry * lookup_one(const char *, struct dentry *);
extern struct dentry * __namei(const char *, unsigned int);
#define namei(pathname) __namei(pathname, 1)
#define namei(pathname) __namei(pathname, LOOKUP_FOLLOW)
#define lnamei(pathname) __namei(pathname, 0)
extern void iput(struct inode *);
......
......@@ -180,7 +180,7 @@ typedef struct page {
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
#define Page_Dirty(page) test_bit(PG_dirty, &(page)->flags)
#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
#define LockPage(page) set_bit(PG_locked, &(page)->flags)
......@@ -207,6 +207,9 @@ typedef struct page {
#define PageClearSwapCache(page) clear_bit(PG_swap_cache, &(page)->flags)
#define PageTestandClearSwapCache(page) test_and_clear_bit(PG_swap_cache, &(page)->flags)
#define PageSwapEntry(page) test_bit(PG_swap_entry, &(page)->flags)
#define SetPageSwapEntry(page) set_bit(PG_swap_entry, &(page)->flags)
#define ClearPageSwapEntry(page) clear_bit(PG_swap_entry, &(page)->flags)
#ifdef CONFIG_HIGHMEM
#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
......
......@@ -63,6 +63,7 @@ struct rpc_task {
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
void (*tk_exit)(struct rpc_task *);
void (*tk_release)(struct rpc_task *);
void * tk_calldata;
/*
......
......@@ -305,7 +305,7 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone)
/* is it a page-cache page? */
if (page->mapping) {
if (!Page_Dirty(page) && !pgcache_under_min()) {
if (!PageDirty(page) && !pgcache_under_min()) {
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
page->mapping = NULL;
......@@ -467,7 +467,7 @@ static inline void __add_to_page_cache(struct page * page,
struct page *alias;
unsigned long flags;
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
get_page(page);
page->index = offset;
......
......@@ -1053,7 +1053,7 @@ static int do_swap_page(struct task_struct * tsk,
pte = mk_pte(page, vma->vm_page_prot);
set_bit(PG_swap_entry, &page->flags);
SetPageSwapEntry(page);
/*
* Freeze the "shared"ness of the page, ie page_count + swap_count.
......
......@@ -108,6 +108,8 @@ void __free_pages_ok (struct page *page, unsigned long order)
BUG();
if (PageLocked(page))
BUG();
if (PageDecrAfter(page))
BUG();
zone = page->zone;
......
......@@ -88,6 +88,9 @@ void __delete_from_swap_cache(struct page *page)
*/
void delete_from_swap_cache_nolock(struct page *page)
{
if (!PageLocked(page))
BUG();
if (block_flushpage(page, 0))
lru_cache_del(page);
......@@ -123,7 +126,7 @@ void free_page_and_swap_cache(struct page *page)
UnlockPage(page);
}
clear_bit(PG_swap_entry, &page->flags);
ClearPageSwapEntry(page);
__free_page(page);
}
......
......@@ -207,7 +207,7 @@ swp_entry_t acquire_swap_entry(struct page *page)
unsigned long offset, type;
swp_entry_t entry;
if (!test_bit(PG_swap_entry, &page->flags))
if (!PageSwapEntry(page))
goto new_swap_entry;
/* We have the old entry in the page offset still */
......
......@@ -201,7 +201,6 @@ rpc_release_client(struct rpc_clnt *clnt)
static void
rpc_default_callback(struct rpc_task *task)
{
rpc_release_task(task);
}
/*
......@@ -266,9 +265,10 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
/* Set up the call info struct and execute the task */
if (task->tk_status == 0)
status = rpc_execute(task);
else
else {
status = task->tk_status;
rpc_release_task(task);
}
rpc_clnt_sigunmask(clnt, &oldset);
......@@ -347,10 +347,9 @@ rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
void
rpc_restart_call(struct rpc_task *task)
{
if (task->tk_flags & RPC_TASK_KILLED) {
rpc_release_task(task);
if (RPC_ASSASSINATED(task))
return;
}
task->tk_action = call_reserve;
rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
}
......
......@@ -508,6 +508,7 @@ __rpc_execute(struct rpc_task *task)
return 0;
}
restarted:
while (1) {
/*
* Execute any pending callback.
......@@ -586,10 +587,29 @@ __rpc_execute(struct rpc_task *task)
}
}
if (task->tk_exit) {
task->tk_exit(task);
/* If tk_action is non-null, the user wants us to restart */
if (task->tk_action) {
if (!RPC_ASSASSINATED(task)) {
/* Release RPC slot and buffer memory */
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_buffer) {
rpc_free(task->tk_buffer);
task->tk_buffer = NULL;
}
goto restarted;
}
printk(KERN_ERR "RPC: dead task tries to walk away.\n");
}
}
dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
status = task->tk_status;
if (task->tk_exit)
task->tk_exit(task);
/* Release all resources associated with the task */
rpc_release_task(task);
return status;
}
......@@ -599,22 +619,32 @@ __rpc_execute(struct rpc_task *task)
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
* NOTE: Upon exit of this function the task is guaranteed to be
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
int
rpc_execute(struct rpc_task *task)
{
int status = -EIO;
if (rpc_inhibit) {
printk(KERN_INFO "RPC: execution inhibited!\n");
return -EIO;
goto out_release;
}
task->tk_flags |= RPC_TASK_RUNNING;
status = -EWOULDBLOCK;
if (task->tk_active) {
printk(KERN_ERR "RPC: active task was run twice!\n");
return -EWOULDBLOCK;
goto out_err;
}
task->tk_active = 1;
task->tk_active = 1;
task->tk_flags |= RPC_TASK_RUNNING;
return __rpc_execute(task);
out_release:
rpc_release_task(task);
out_err:
return status;
}
/*
......@@ -758,6 +788,13 @@ rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
current->pid);
}
static void
rpc_default_free_task(struct rpc_task *task)
{
dprintk("RPC: %4d freeing task\n", task->tk_pid);
rpc_free(task);
}
/*
* Create a new task for the specified client. We have to
* clean up after an allocation failure, as the client may
......@@ -774,6 +811,9 @@ rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
rpc_init_task(task, clnt, callback, flags);
/* Replace tk_release */
task->tk_release = rpc_default_free_task;
dprintk("RPC: %4d allocated task\n", task->tk_pid);
task->tk_flags |= RPC_TASK_DYNAMIC;
out:
......@@ -849,12 +889,8 @@ rpc_release_task(struct rpc_task *task)
#ifdef RPC_DEBUG
task->tk_magic = 0;
#endif
if (task->tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %4d freeing task\n", task->tk_pid);
task->tk_flags &= ~RPC_TASK_DYNAMIC;
rpc_free(task);
}
if (task->tk_release)
task->tk_release(task);
}
/*
......@@ -886,7 +922,6 @@ rpc_child_exit(struct rpc_task *child)
__rpc_wake_up(parent);
}
spin_unlock_bh(&rpc_queue_lock);
rpc_release_task(child);
}
/*
......
......@@ -27,7 +27,6 @@ EXPORT_SYMBOL(rpc_allocate);
EXPORT_SYMBOL(rpc_free);
EXPORT_SYMBOL(rpc_execute);
EXPORT_SYMBOL(rpc_init_task);
EXPORT_SYMBOL(rpc_release_task);
EXPORT_SYMBOL(rpc_sleep_on);
EXPORT_SYMBOL(rpc_wake_up_next);
EXPORT_SYMBOL(rpc_wake_up_task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment