Commit 2f3247a1 authored by Patrick Mochel's avatar Patrick Mochel

Merge osdl.org:/home/mochel/src/kernel/devel/linux-2.5-virgin

into osdl.org:/home/mochel/src/kernel/devel/linux-2.5-sysfs
parents f85645c7 d103bdbf
......@@ -670,12 +670,10 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
bit++;
} while (bit < __REQ_NR_BITS);
if (rq->flags & REQ_CMD)
printk("sector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
rq->nr_sectors,
rq->current_nr_sectors);
printk("\n");
printk("bio %p, biotail %p\n", rq->bio, rq->biotail);
}
void blk_recount_segments(request_queue_t *q, struct bio *bio)
......@@ -1927,7 +1925,7 @@ inline void blk_recalc_rq_segments(struct request *rq)
inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (rq->flags & REQ_CMD) {
if (rq->bio) {
rq->hard_sector += nsect;
rq->nr_sectors = rq->hard_nr_sectors -= nsect;
rq->sector = rq->hard_sector;
......@@ -1968,20 +1966,28 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
req->errors = 0;
if (!uptodate) {
printk("end_request: I/O error, dev %s, sector %llu\n",
kdevname(req->rq_dev), (unsigned long long)req->sector);
error = -EIO;
if (!(req->flags & REQ_QUIET))
printk("end_request: I/O error, dev %s, sector %llu\n",
kdevname(req->rq_dev),
(unsigned long long)req->sector);
}
while ((bio = req->bio)) {
const int nsect = bio_iovec(bio)->bv_len >> 9;
int new_bio = 0;
int new_bio = 0, nsect;
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
printk("%s: bio idx %d >= vcnt %d\n", __FUNCTION__,
bio->bi_idx, bio->bi_vcnt);
break;
}
BIO_BUG_ON(bio_iovec(bio)->bv_len > bio->bi_size);
/*
* not a complete bvec done
*/
nsect = bio_iovec(bio)->bv_len >> 9;
if (unlikely(nsect > nr_sectors)) {
int partial = nr_sectors << 9;
......
......@@ -267,6 +267,7 @@
#include <linux/blkpg.h>
#include <linux/init.h>
#include <linux/fcntl.h>
#include <linux/blkdev.h>
#include <asm/uaccess.h>
......@@ -1436,6 +1437,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct inode *ip,
struct cdrom_device_ops *cdo = cdi->ops;
int ret;
/* Try the generic SCSI command ioctl's first.. */
ret = scsi_cmd_ioctl(ip->i_bdev, cmd, arg);
if (ret != -ENOTTY)
return ret;
/* the first few commands do not deal with audio drive_info, but
only with routines in cdrom device operations. */
switch (cmd) {
......
......@@ -3026,14 +3026,7 @@ int ide_cdrom_ioctl (ide_drive_t *drive,
unsigned int cmd, unsigned long arg)
{
struct cdrom_info *info = drive->driver_data;
int error;
/* Try the generic SCSI command ioctl's first.. */
error = scsi_cmd_ioctl(inode->i_bdev, cmd, arg);
if (error != -ENOTTY)
return error;
/* Then the generic cdrom ioctl's.. */
return cdrom_ioctl(&info->devinfo, inode, cmd, arg);
}
......
......@@ -30,14 +30,10 @@
* to do with card config are filled in after the card is detected.
*/
#define AIC7XXX { \
next: NULL, \
module: NULL, \
proc_info: aic7xxx_proc_info, \
name: NULL, \
detect: aic7xxx_detect, \
release: aic7xxx_release, \
info: aic7xxx_info, \
command: NULL, \
queuecommand: aic7xxx_queue, \
eh_strategy_handler: NULL, \
eh_abort_handler: NULL, \
......
......@@ -857,7 +857,7 @@ void scsi_request_fn(request_queue_t * q)
scsi_init_cmd_from_req(SCpnt, SRpnt);
}
} else if (req->flags & REQ_CMD) {
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
SRpnt = NULL;
STpnt = scsi_get_request_dev(req);
if (!STpnt) {
......@@ -919,7 +919,7 @@ void scsi_request_fn(request_queue_t * q)
req = NULL;
spin_unlock_irq(q->queue_lock);
if (SCpnt->request->flags & REQ_CMD) {
if (SCpnt->request->flags & (REQ_CMD | REQ_BLOCK_PC)) {
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
......
......@@ -62,16 +62,28 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
int count, gfp_mask;
/*
* First we need to know how many scatter gather segments are needed.
* non-sg block request. FIXME: check bouncing for isa hosts!
*/
count = req->nr_phys_segments;
if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
/*
* FIXME: isa bouncing
*/
if (SCpnt->host->unchecked_isa_dma)
goto fail;
SCpnt->request_bufflen = req->data_len;
SCpnt->request_buffer = req->data;
req->buffer = req->data;
SCpnt->use_sg = 0;
return 1;
}
/*
* we used to not use scatter-gather for single segment request,
* but now we do (it makes highmem I/O easier to support without
* kmapping pages)
*/
SCpnt->use_sg = count;
SCpnt->use_sg = req->nr_phys_segments;
gfp_mask = GFP_NOIO;
if (in_interrupt()) {
......@@ -111,6 +123,7 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
/*
* kill it. there should be no leftover blocks in this request
*/
fail:
SCpnt = scsi_end_request(SCpnt, 0, req->nr_sectors);
BUG_ON(SCpnt);
return 0;
......
......@@ -191,6 +191,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
Scsi_Device *sdp = sdkp->device;
struct Scsi_Host *host;
int diskinfo[4];
int error;
SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
disk->disk_name, cmd));
......@@ -206,6 +207,10 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
if( !scsi_block_when_processing_errors(sdp) )
return -ENODEV;
error = scsi_cmd_ioctl(inode->i_bdev, cmd, arg);
if (error != -ENOTTY)
return error;
switch (cmd)
{
case HDIO_GETGEO: /* Return BIOS disk parameters */
......@@ -273,12 +278,41 @@ static struct gendisk **sd_disks;
**/
static int sd_init_command(Scsi_Cmnd * SCpnt)
{
int this_count;
int this_count, timeout;
struct gendisk *disk;
sector_t block;
Scsi_Device *sdp;
Scsi_Device *sdp = SCpnt->device;
timeout = SD_TIMEOUT;
if (SCpnt->device->type != TYPE_DISK)
timeout = SD_MOD_TIMEOUT;
/*
* these are already setup, just copy cdb basically
*/
if (SCpnt->request->flags & REQ_BLOCK_PC) {
struct request *rq = SCpnt->request;
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = SCSI_DATA_WRITE;
else if (rq->data_len)
SCpnt->sc_data_direction = SCSI_DATA_READ;
else
SCpnt->sc_data_direction = SCSI_DATA_NONE;
this_count = rq->data_len;
if (rq->timeout)
timeout = rq->timeout;
goto queue;
}
/*
* don't support specials for nwo
* we only do REQ_CMD and REQ_BLOCK_PC
*/
if (!(SCpnt->request->flags & REQ_CMD))
return 0;
......@@ -290,7 +324,6 @@ static int sd_init_command(Scsi_Cmnd * SCpnt)
SCSI_LOG_HLQUEUE(1, printk("sd_command_init: disk=%s, block=%llu, "
"count=%d\n", disk->disk_name, (unsigned long long)block, this_count));
sdp = SCpnt->device;
if (!sdp || !sdp->online ||
block + SCpnt->request->nr_sectors > get_capacity(disk)) {
SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
......@@ -398,12 +431,12 @@ static int sd_init_command(Scsi_Cmnd * SCpnt)
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
queue:
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
SCpnt->timeout_per_command = (SCpnt->device->type == TYPE_DISK ?
SD_TIMEOUT : SD_MOD_TIMEOUT);
SCpnt->timeout_per_command = timeout;
/*
* This is the completion routine we use. This is matched in terms
......
......@@ -247,7 +247,7 @@ static void rw_intr(Scsi_Cmnd * SCpnt)
static int sr_init_command(Scsi_Cmnd * SCpnt)
{
int block=0, this_count, s_size;
int block=0, this_count, s_size, timeout = SR_TIMEOUT;
Scsi_CD *cd = SCpnt->request->rq_disk->private_data;
SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %s, block = %d\n", disk->disk_name, block));
......@@ -266,6 +266,30 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
return 0;
}
/*
* these are already setup, just copy cdb basically
*/
if (SCpnt->request->flags & REQ_BLOCK_PC) {
struct request *rq = SCpnt->request;
if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd))
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = SCSI_DATA_WRITE;
else if (rq->data_len)
SCpnt->sc_data_direction = SCSI_DATA_READ;
else
SCpnt->sc_data_direction = SCSI_DATA_NONE;
this_count = rq->data_len;
if (rq->timeout)
timeout = rq->timeout;
goto queue;
}
if (!(SCpnt->request->flags & REQ_CMD)) {
blk_dump_rq_flags(SCpnt->request, "sr unsup command");
return 0;
......@@ -336,11 +360,12 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
queue:
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
SCpnt->timeout_per_command = SR_TIMEOUT;
SCpnt->timeout_per_command = timeout;
/*
* This is the completion routine we use. This is matched in terms
......
......@@ -1793,16 +1793,12 @@ static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
hcb_p np = 0;
int retv;
for (host = first_host; host; host = host->next) {
if (host->hostt != first_host->hostt)
continue;
if (host->host_no == hostno) {
host = scsi_host_hn_get(hostno);
if (!host)
return -EINVAL;
host_data = (struct host_data *) host->hostdata;
np = host_data->ncb;
break;
}
}
if (!np)
return -EINVAL;
......@@ -1823,6 +1819,7 @@ static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
#endif
}
scsi_host_put(host);
return retv;
}
#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
......
......@@ -129,11 +129,7 @@ static int kafscmd(void *arg)
/* only certain signals are of interest */
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked,0);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
recalc_sigpending();
#else
recalc_sigpending(current);
#endif
spin_unlock_irq(&current->sig->siglock);
/* loop around looking for things to attend to */
......@@ -360,6 +356,9 @@ void afscm_stop(void)
rxrpc_call_abort(call,-ESRCH); /* abort, dequeue and put */
_debug("nuking active call %08x.%d",
ntohl(call->conn->conn_id),ntohl(call->call_id));
rxrpc_put_call(call);
rxrpc_put_call(call);
spin_lock(&afscm_calls_lock);
......
......@@ -235,6 +235,7 @@ struct request_queue
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_empty(q) elv_queue_empty(q)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) ((rq)->flags & 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment