Commit 588a2e29 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.22pre3

parent a7f16fab
......@@ -12581,8 +12581,7 @@ CONFIG_PPDEV
Saying Y to this adds support for /dev/parport device nodes. This
is needed for programs that want portable access to the parallel
port, for instance deviceid (which displays Plug-and-Play device
IDs) and vlp (which makes a Linux computer act as though it's a
printer).
IDs).
This is the parallel port equivalent of SCSI generic support (sg).
It is safe to say N to this -- it is not needed for normal printing
......
......@@ -278,6 +278,9 @@ int parport_negotiate (struct parport *port, int mode)
return -ENOSYS; /* FIXME (implement BECP) */
}
if (mode & IEEE1284_EXT_LINK)
m = 1<<7; /* request extensibility link */
port->ieee1284.phase = IEEE1284_PH_NEGOTIATION;
/* Start off with nStrobe and nAutoFd high, and nSelectIn low */
......@@ -354,12 +357,59 @@ int parport_negotiate (struct parport *port, int mode)
return 1;
}
/* More to do if we've requested extensibility link. */
if (mode & IEEE1284_EXT_LINK) {
m = mode & 0x7f;
udelay (1);
parport_write_data (port, m);
udelay (1);
/* Event 51: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 53: Set nStrobe high */
udelay (5);
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
/* Event 55: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
DPRINTK (KERN_DEBUG
"%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
/* Event 54: Peripheral sets XFlag to reflect support */
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
"supported\n", port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* Any further setup is left to the caller. */
}
/* Mode is supported */
DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
if (mode & IEEE1284_MODE_ECP) {
if (!(mode & IEEE1284_EXT_LINK) && (mode & IEEE1284_MODE_ECP)) {
port->ieee1284.phase = IEEE1284_PH_ECP_SETUP;
/* Event 30: Set nAutoFd low */
......
......@@ -1708,58 +1708,83 @@ static int __init parport_pc_init_pci (int irq, int dma)
struct {
unsigned int vendor;
unsigned int device;
unsigned int subvendor;
unsigned int subdevice;
unsigned int numports;
struct {
unsigned long lo;
unsigned long hi; /* -ve if not there */
} addr[4];
} cards[] = {
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_550, 1,
{ { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_650, 1,
{ { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_850, 1,
{ { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_10x, 1,
{ { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_10x, 2,
{ { 2, 3 }, { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_550, 1,
{ { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_650, 1,
{ { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_850, 1,
{ { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_20x, 1,
{ { 0, 1 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_20x, 2,
{ { 0, 1 }, { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_550, 2,
{ { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_650, 2,
{ { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_850, 2,
{ { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_550, 1,
{ { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_650, 1,
{ { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_850, 1,
{ { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_550, 1,
{ { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_650, 1,
{ { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850, 1,
{ { 2, 3 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PARALLEL, 1,
{ { 0, -1 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_A, 1,
{ { 0, -1 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_B, 1,
{ { 0, -1 }, } },
{ PCI_VENDOR_ID_EXSYS, PCI_DEVICE_ID_EXSYS_4014, 2,
{ { 2, -1 }, { 3, -1 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_550,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_650,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_850,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_10x,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_10x,
PCI_ANY_ID, PCI_ANY_ID,
2, { { 2, 3 }, { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_550,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_650,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_850,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 4, 5 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_20x,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 0, 1 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_20x,
PCI_ANY_ID, PCI_ANY_ID,
2, { { 0, 1 }, { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_550,
PCI_ANY_ID, PCI_ANY_ID,
2, { { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_650,
PCI_ANY_ID, PCI_ANY_ID,
2, { { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_850,
PCI_ANY_ID, PCI_ANY_ID,
2, { { 1, 2 }, { 3, 4 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_550,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_650,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_850,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 1, 2 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_550,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_650,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 2, 3 }, } },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 2, 3 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PARALLEL,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 0, -1 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_A,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 0, -1 }, } },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_B,
PCI_ANY_ID, PCI_ANY_ID,
1, { { 0, -1 }, } },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014,
2, { { 4, -1 }, { 5, -1 }, } },
{ 0, }
};
......@@ -1776,6 +1801,15 @@ static int __init parport_pc_init_pci (int irq, int dma)
cards[i].device,
pcidev)) != NULL) {
int n;
if (cards[i].subvendor != PCI_ANY_ID &&
cards[i].subvendor != pcidev->subsystem_vendor)
continue;
if (cards[i].subdevice != PCI_ANY_ID &&
cards[i].subdevice != pcidev->subsystem_device)
continue;
for (n = 0; n < cards[i].numports; n++) {
unsigned long lo = cards[i].addr[n].lo;
unsigned long hi = cards[i].addr[n].hi;
......
......@@ -994,10 +994,6 @@ VENDOR( ATRONICS, "Atronics" )
DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL")
ENDVENDOR()
VENDOR( EXSYS, "Exsys" )
DEVICE( EXSYS, EXSYS_4014, "EX-4014")
ENDVENDOR()
VENDOR( TIGERJET, "TigerJet" )
DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN")
ENDVENDOR()
......
......@@ -34,7 +34,7 @@ printk_name(const char *name, int len)
len = 80;
strncpy(buf, name, len);
buf[len] = 0;
printk(buf);
printk("%s", buf);
}
#endif
......
......@@ -71,6 +71,10 @@ typedef enum {
#define IEEE1284_MODE_EPPSL (1<<11) /* EPP 1.7 */
#define IEEE1284_MODE_EPPSWE (1<<12) /* Software-emulated */
#define IEEE1284_DEVICEID (1<<2) /* This is a flag */
#define IEEE1284_EXT_LINK (1<<14) /* This flag causes the
* extensibility link to
* be requested, using
* bits 0-6. */
/* For the benefit of parport_read/write, you can use these with
* parport_negotiate to use address operations. They have no effect
......
......@@ -1076,8 +1076,8 @@
#define PCI_VENDOR_ID_HOLTEK 0x9412
#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
#define PCI_VENDOR_ID_EXSYS 0xd84d
#define PCI_DEVICE_ID_EXSYS_4014 0x4014
#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
#define PCI_VENDOR_ID_TIGERJET 0xe159
#define PCI_DEVICE_ID_TIGERJET_300 0x0001
......
......@@ -699,6 +699,8 @@ extern void exit_fs(struct task_struct *);
extern void exit_files(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void daemonize(void);
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
......
......@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/sunrpc/clnt.h>
#include <linux/spinlock.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
......@@ -71,6 +72,8 @@ rpcauth_destroy(struct rpc_auth *auth)
auth->au_ops->destroy(auth);
}
spinlock_t rpc_credcache_lock = SPIN_LOCK_UNLOCKED;
/*
* Initialize RPC credential cache
*/
......@@ -94,6 +97,7 @@ rpcauth_free_credcache(struct rpc_auth *auth)
if (!(destroy = auth->au_ops->crdestroy))
destroy = (void (*)(struct rpc_cred *)) rpc_free;
spin_lock(&rpc_credcache_lock);
for (i = 0; i < RPC_CREDCACHE_NR; i++) {
q = &auth->au_credcache[i];
while ((cred = *q) != NULL) {
......@@ -101,6 +105,7 @@ rpcauth_free_credcache(struct rpc_auth *auth)
destroy(cred);
}
}
spin_unlock(&rpc_credcache_lock);
}
/*
......@@ -113,6 +118,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth)
int i, safe = 0;
dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth);
spin_lock(&rpc_credcache_lock);
for (i = 0; i < RPC_CREDCACHE_NR; i++) {
q = &auth->au_credcache[i];
while ((cred = *q) != NULL) {
......@@ -129,6 +135,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth)
q = &cred->cr_next;
}
}
spin_unlock(&rpc_credcache_lock);
while ((cred = free) != NULL) {
free = cred->cr_next;
rpc_free(cred);
......@@ -145,10 +152,12 @@ rpcauth_insert_credcache(struct rpc_auth *auth, struct rpc_cred *cred)
int nr;
nr = (cred->cr_uid % RPC_CREDCACHE_NR);
spin_lock(&rpc_credcache_lock);
cred->cr_next = auth->au_credcache[nr];
auth->au_credcache[nr] = cred;
cred->cr_expire = jiffies + auth->au_expire;
cred->cr_count++;
spin_unlock(&rpc_credcache_lock);
}
/*
......@@ -166,6 +175,7 @@ rpcauth_lookup_credcache(struct rpc_task *task)
if (time_before(auth->au_nextgc, jiffies))
rpcauth_gc_credcache(auth);
spin_lock(&rpc_credcache_lock);
q = &auth->au_credcache[nr];
while ((cred = *q) != NULL) {
if (auth->au_ops->crmatch(task, cred)) {
......@@ -174,6 +184,7 @@ rpcauth_lookup_credcache(struct rpc_task *task)
}
q = &cred->cr_next;
}
spin_unlock(&rpc_credcache_lock);
if (!cred)
cred = auth->au_ops->crcreate(task);
......@@ -194,6 +205,7 @@ rpcauth_remove_credcache(struct rpc_auth *auth, struct rpc_cred *cred)
int nr;
nr = (cred->cr_uid % RPC_CREDCACHE_NR);
spin_lock(&rpc_credcache_lock);
q = &auth->au_credcache[nr];
while ((cr = *q) != NULL) {
if (cred == cr) {
......@@ -202,6 +214,7 @@ rpcauth_remove_credcache(struct rpc_auth *auth, struct rpc_cred *cred)
}
q = &cred->cr_next;
}
spin_unlock(&rpc_credcache_lock);
}
struct rpc_cred *
......
......@@ -18,6 +18,7 @@
#include <linux/unistd.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/sunrpc/clnt.h>
......@@ -73,6 +74,16 @@ static int rpc_inhibit = 0;
static u32 swap_buffer[PAGE_SIZE >> 2];
static int swap_buffer_used = 0;
/*
* Spinlock for wait queues. Access to the latter also has to be
* interrupt-safe in order to allow timers to wake up sleeping tasks.
*/
spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
/*
* Spinlock for other critical sections of code.
*/
spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
/*
* Add new request to wait queue.
*
......@@ -81,8 +92,8 @@ static int swap_buffer_used = 0;
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
int
rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
static int
__rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (task->tk_rpcwait) {
if (task->tk_rpcwait != queue)
......@@ -104,12 +115,24 @@ rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
return 0;
}
int
rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task)
{
unsigned long oldflags;
int result;
spin_lock_irqsave(&rpc_queue_lock, oldflags);
result = __rpc_add_wait_queue(q, task);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
return result;
}
/*
* Remove request from queue.
* Note: must be called with interrupts disabled.
* Note: must be called with spin lock held.
*/
void
rpc_remove_wait_queue(struct rpc_task *task)
static void
__rpc_remove_wait_queue(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
......@@ -122,6 +145,16 @@ rpc_remove_wait_queue(struct rpc_task *task)
task->tk_pid, queue, rpc_qname(queue));
}
void
rpc_remove_wait_queue(struct rpc_task *task)
{
unsigned long oldflags;
spin_lock_irqsave(&rpc_queue_lock, oldflags);
__rpc_remove_wait_queue(task);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
* Set up a timer for the current task.
*/
......@@ -165,7 +198,7 @@ rpc_del_timer(struct rpc_task *task)
* Make an RPC task runnable.
*
* Note: If the task is ASYNC, this must be called with
* interrupts disabled to protect the wait queue operation.
* the spinlock held to protect the wait queue operation.
*/
static inline void
rpc_make_runnable(struct rpc_task *task)
......@@ -177,7 +210,7 @@ rpc_make_runnable(struct rpc_task *task)
task->tk_flags |= RPC_TASK_RUNNING;
if (RPC_IS_ASYNC(task)) {
int status;
status = rpc_add_wait_queue(&schedq, task);
status = __rpc_add_wait_queue(&schedq, task);
if (status)
{
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
......@@ -214,18 +247,12 @@ static void
__rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, rpc_action timer)
{
unsigned long oldflags;
int status;
dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
rpc_qname(q), jiffies);
/*
* Protect the execution below.
*/
save_flags(oldflags); cli();
status = rpc_add_wait_queue(q, task);
status = __rpc_add_wait_queue(q, task);
if (status)
{
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
......@@ -240,7 +267,6 @@ __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
task->tk_flags &= ~RPC_TASK_RUNNING;
}
restore_flags(oldflags);
return;
}
......@@ -248,11 +274,17 @@ void
rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, rpc_action timer)
{
unsigned long oldflags;
/*
* Protect the queue operations.
*/
spin_lock_irqsave(&rpc_queue_lock, oldflags);
__rpc_sleep_on(q, task, action, timer);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
* Wake up a single task -- must be invoked with bottom halves off.
* Wake up a single task -- must be invoked with spin lock held.
*
* It would probably suffice to cli/sti the del_timer and remove_wait_queue
* operations individually.
......@@ -272,7 +304,7 @@ __rpc_wake_up(struct rpc_task *task)
#endif
rpc_del_timer(task);
if (task->tk_rpcwait != &schedq)
rpc_remove_wait_queue(task);
__rpc_remove_wait_queue(task);
if (!RPC_IS_RUNNING(task)) {
task->tk_flags |= RPC_TASK_CALLBACK;
rpc_make_runnable(task);
......@@ -289,7 +321,7 @@ __rpc_default_timer(struct rpc_task *task)
dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
task->tk_timeout = 0;
__rpc_wake_up(task);
rpc_wake_up_task(task);
}
/*
......@@ -300,9 +332,9 @@ rpc_wake_up_task(struct rpc_task *task)
{
unsigned long oldflags;
save_flags(oldflags); cli();
spin_lock_irqsave(&rpc_queue_lock, oldflags);
__rpc_wake_up(task);
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
......@@ -315,10 +347,10 @@ rpc_wake_up_next(struct rpc_wait_queue *queue)
struct rpc_task *task;
dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
save_flags(oldflags); cli();
spin_lock_irqsave(&rpc_queue_lock, oldflags);
if ((task = queue->task) != 0)
__rpc_wake_up(task);
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
return task;
}
......@@ -331,10 +363,10 @@ rpc_wake_up(struct rpc_wait_queue *queue)
{
unsigned long oldflags;
save_flags(oldflags); cli();
spin_lock_irqsave(&rpc_queue_lock, oldflags);
while (queue->task)
__rpc_wake_up(queue->task);
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
......@@ -346,12 +378,12 @@ rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
struct rpc_task *task;
unsigned long oldflags;
save_flags(oldflags); cli();
spin_lock_irqsave(&rpc_queue_lock, oldflags);
while ((task = queue->task) != NULL) {
task->tk_status = status;
__rpc_wake_up(task);
}
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
......@@ -369,7 +401,7 @@ static void
__rpc_atrun(struct rpc_task *task)
{
task->tk_status = 0;
__rpc_wake_up(task);
rpc_wake_up_task(task);
}
/*
......@@ -432,13 +464,13 @@ __rpc_execute(struct rpc_task *task)
* and the RPC reply arrives before we get here, it will
* have state RUNNING, but will still be on schedq.
*/
save_flags(oldflags); cli();
spin_lock_irqsave(&rpc_queue_lock, oldflags);
if (RPC_IS_RUNNING(task)) {
if (task->tk_rpcwait == &schedq)
rpc_remove_wait_queue(task);
__rpc_remove_wait_queue(task);
} else while (!RPC_IS_RUNNING(task)) {
if (RPC_IS_ASYNC(task)) {
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
return 0;
}
......@@ -448,9 +480,9 @@ __rpc_execute(struct rpc_task *task)
if (current->pid == rpciod_pid)
printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
sti();
spin_unlock_irq(&rpc_queue_lock);
__wait_event(task->tk_wait, RPC_IS_RUNNING(task));
cli();
spin_lock_irq(&rpc_queue_lock);
/*
* When the task received a signal, remove from
......@@ -462,7 +494,7 @@ __rpc_execute(struct rpc_task *task)
dprintk("RPC: %4d sync task resuming\n",
task->tk_pid);
}
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
/*
* When a sync task receives a signal, it exits with
......@@ -522,15 +554,16 @@ __rpc_schedule(void)
int need_resched = current->need_resched;
dprintk("RPC: rpc_schedule enter\n");
save_flags(oldflags);
while (1) {
cli();
if (!(task = schedq.task))
spin_lock_irqsave(&rpc_queue_lock, oldflags);
if (!(task = schedq.task)) {
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
break;
}
rpc_del_timer(task);
rpc_remove_wait_queue(task);
__rpc_remove_wait_queue(task);
task->tk_flags |= RPC_TASK_RUNNING;
restore_flags(oldflags);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
__rpc_execute(task);
......@@ -541,7 +574,6 @@ __rpc_schedule(void)
if (need_resched)
schedule();
}
restore_flags(oldflags);
dprintk("RPC: rpc_schedule leave\n");
}
......@@ -626,11 +658,13 @@ rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
task->tk_suid_retry = 1;
/* Add to global list of all tasks */
spin_lock(&rpc_sched_lock);
task->tk_next_task = all_tasks;
task->tk_prev_task = NULL;
if (all_tasks)
all_tasks->tk_prev_task = task;
all_tasks = task;
spin_unlock(&rpc_sched_lock);
if (clnt)
clnt->cl_users++;
......@@ -679,10 +713,12 @@ void
rpc_release_task(struct rpc_task *task)
{
struct rpc_task *next, *prev;
unsigned long oldflags;
dprintk("RPC: %4d release task\n", task->tk_pid);
/* Remove from global task list */
spin_lock(&rpc_sched_lock);
prev = task->tk_prev_task;
next = task->tk_next_task;
if (next)
......@@ -691,6 +727,19 @@ rpc_release_task(struct rpc_task *task)
prev->tk_next_task = next;
else
all_tasks = next;
task->tk_next_task = task->tk_prev_task = NULL;
spin_unlock(&rpc_sched_lock);
/* Protect the execution below. */
spin_lock_irqsave(&rpc_queue_lock, oldflags);
/* Delete any running timer */
rpc_del_timer(task);
/* Remove from any wait queue we're still on */
__rpc_remove_wait_queue(task);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
/* Release resources */
if (task->tk_rqstp)
......@@ -738,12 +787,15 @@ rpc_find_parent(struct rpc_task *child)
static void
rpc_child_exit(struct rpc_task *child)
{
unsigned long oldflags;
struct rpc_task *parent;
spin_lock_irqsave(&rpc_queue_lock, oldflags);
if ((parent = rpc_find_parent(child)) != NULL) {
parent->tk_status = child->tk_status;
rpc_wake_up_task(parent);
__rpc_wake_up(parent);
}
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
rpc_release_task(child);
}
......@@ -772,11 +824,11 @@ rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
{
unsigned long oldflags;
save_flags(oldflags); cli();
rpc_make_runnable(child);
restore_flags(oldflags);
spin_lock_irqsave(&rpc_queue_lock, oldflags);
/* N.B. Is it possible for the child to have already finished? */
rpc_sleep_on(&childq, task, func, NULL);
__rpc_sleep_on(&childq, task, func, NULL);
rpc_make_runnable(child);
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
}
/*
......@@ -789,8 +841,11 @@ rpc_killall_tasks(struct rpc_clnt *clnt)
struct rpc_task **q, *rovr;
dprintk("RPC: killing all tasks for client %p\n", clnt);
/* N.B. Why bother to inhibit? Nothing blocks here ... */
rpc_inhibit++;
/*
* Spin lock all_tasks to prevent changes...
*/
spin_lock(&rpc_sched_lock);
for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) {
if (!clnt || rovr->tk_client == clnt) {
rovr->tk_flags |= RPC_TASK_KILLED;
......@@ -798,7 +853,7 @@ rpc_killall_tasks(struct rpc_clnt *clnt)
rpc_wake_up_task(rovr);
}
}
rpc_inhibit--;
spin_unlock(&rpc_sched_lock);
}
static DECLARE_MUTEX_LOCKED(rpciod_running);
......@@ -989,8 +1044,12 @@ void rpc_show_tasks(void)
struct rpc_task *t = all_tasks, *next;
struct nfs_wreq *wreq;
if (!t)
spin_lock(&rpc_sched_lock);
t = all_tasks;
if (!t) {
spin_unlock(&rpc_sched_lock);
return;
}
printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
"-rpcwait -action- --exit--\n");
for (; t; t = next) {
......@@ -1013,5 +1072,6 @@ void rpc_show_tasks(void)
wreq->wb_file->f_dentry->d_parent->d_name.name,
wreq->wb_file->f_dentry->d_name.name);
}
spin_unlock(&rpc_sched_lock);
}
#endif
......@@ -76,6 +76,9 @@
static struct rpc_xprt * sock_list = NULL;
#endif
/* Spinlock for critical sections in the code. */
spinlock_t xprt_lock = SPIN_LOCK_UNLOCKED;
#ifdef RPC_DEBUG
# undef RPC_DEBUG_DATA
# define RPCDBG_FACILITY RPCDBG_XPRT
......@@ -497,6 +500,7 @@ xprt_reconn_timeout(struct rpc_task *task)
rpc_wake_up_task(task);
}
extern spinlock_t rpc_queue_lock;
/*
* Look up the RPC request corresponding to a reply.
*/
......@@ -505,22 +509,28 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
{
struct rpc_task *head, *task;
struct rpc_rqst *req;
unsigned long oldflags;
int safe = 0;
spin_lock_irqsave(&rpc_queue_lock, oldflags);
if ((head = xprt->pending.task) != NULL) {
task = head;
do {
if ((req = task->tk_rqstp) && req->rq_xid == xid)
return req;
goto out;
task = task->tk_next;
if (++safe > 100) {
printk("xprt_lookup_rqst: loop in Q!\n");
return NULL;
goto out_bad;
}
} while (task != head);
}
dprintk("RPC: unknown XID %08x in reply.\n", xid);
return NULL;
out_bad:
req = NULL;
out:
spin_unlock_irqrestore(&rpc_queue_lock, oldflags);
return req;
}
/*
......@@ -1018,6 +1028,7 @@ xprt_down_transmit(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
start_bh_atomic();
spin_lock(&xprt_lock);
if (xprt->snd_task && xprt->snd_task != task) {
dprintk("RPC: %4d TCP write queue full (task %d)\n",
task->tk_pid, xprt->snd_task->tk_pid);
......@@ -1030,6 +1041,7 @@ xprt_down_transmit(struct rpc_task *task)
#endif
req->rq_bytes_sent = 0;
}
spin_unlock(&xprt_lock);
end_bh_atomic();
return xprt->snd_task == task;
}
......@@ -1132,11 +1144,12 @@ do_xprt_transmit(struct rpc_task *task)
if (status < 0)
break;
if (xprt->stream)
if (xprt->stream) {
req->rq_bytes_sent += status;
if (req->rq_bytes_sent >= req->rq_slen)
goto out_release;
if (req->rq_bytes_sent >= req->rq_slen)
goto out_release;
}
if (status < req->rq_slen)
status = -EAGAIN;
......@@ -1304,11 +1317,14 @@ xprt_reserve_status(struct rpc_task *task)
} else if (!RPCXPRT_CONGESTED(xprt) && xprt->free) {
/* OK: There's room for us. Grab a free slot and bump
* congestion value */
spin_lock(&xprt_lock);
if (!(req = xprt->free)) {
spin_unlock(&xprt_lock);
goto out_nofree;
}
xprt->free = req->rq_next;
req->rq_next = NULL;
spin_unlock(&xprt_lock);
xprt->cong += RPC_CWNDSCALE;
task->tk_rqstp = req;
xprt_request_init(task, xprt);
......@@ -1363,8 +1379,19 @@ xprt_release(struct rpc_task *task)
dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
spin_lock(&xprt_lock);
req->rq_next = xprt->free;
xprt->free = req;
spin_unlock(&xprt_lock);
/* remove slot from queue of pending */
start_bh_atomic();
if (task->tk_rpcwait) {
printk("RPC: task of released request still queued!\n");
rpc_del_timer(task);
rpc_remove_wait_queue(task);
}
end_bh_atomic();
/* Decrease congestion value. */
xprt->cong -= RPC_CWNDSCALE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment